summaryrefslogtreecommitdiffstats
path: root/tests/bitrot
diff options
context:
space:
mode:
authorJeff Darcy <jdarcy@redhat.com>2017-01-31 14:49:45 -0500
committerShyamsundar Ranganathan <srangana@redhat.com>2017-02-01 19:54:58 -0500
commit83803b4b2d70e9e6e16bb050d7ac8e49ba420893 (patch)
tree9a6c1f3f9a723bf578f78c624d3ce9f44baac6db /tests/bitrot
parent80b04666ec7019e132f76f734a88559457702f1b (diff)
core: run many bricks within one glusterfsd process
This patch adds support for multiple brick translator stacks running in a single brick server process. This reduces our per-brick memory usage by approximately 3x, and our appetite for TCP ports even more. It also creates potential to avoid process/thread thrashing, and to improve QoS by scheduling more carefully across the bricks, but realizing that potential will require further work. Multiplexing is controlled by the "cluster.brick-multiplex" global option. By default it's off, and bricks are started in separate processes as before. If multiplexing is enabled, then *compatible* bricks (mostly those with the same transport options) will be started in the same process. Backport of: > Change-Id: I45059454e51d6f4cbb29a4953359c09a408695cb > BUG: 1385758 > Reviewed-on: https://review.gluster.org/14763 Change-Id: I4bce9080f6c93d50171823298fdf920258317ee8 BUG: 1418091 Signed-off-by: Jeff Darcy <jdarcy@redhat.com> Reviewed-on: https://review.gluster.org/16496 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Shyamsundar Ranganathan <srangana@redhat.com>
Diffstat (limited to 'tests/bitrot')
-rw-r--r--tests/bitrot/bug-1373520.t42
1 files changed, 31 insertions, 11 deletions
diff --git a/tests/bitrot/bug-1373520.t b/tests/bitrot/bug-1373520.t
index 3a0ac5293e0..7b8e48dd083 100644
--- a/tests/bitrot/bug-1373520.t
+++ b/tests/bitrot/bug-1373520.t
@@ -17,7 +17,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'
TEST $CLI volume set $V0 performance.stat-prefetch off
#Mount the volume
-TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+TEST $GFS -s $H0 --volfile-id $V0 $M0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
#Enable bitrot
@@ -46,18 +46,38 @@ TEST $CLI volume start $V0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
-#Trigger lookup so that bitrot xlator marks file as bad in its inode context.
-TEST stat $M0/FILE1
-
#Delete file and all links from backend
-TEST stat $B0/${V0}5/FILE1
-TEST `ls -li $B0/${V0}5/FILE1 | awk '{print $1}' | xargs find $B0/${V0}5/ -inum | xargs -r rm -rf`
+TEST rm -rf $(find $B0/${V0}5 -inum $(stat -c %i $B0/${V0}5/FILE1))
+
+# The test for each file below used to look like this:
+#
+# TEST stat $M0/FILE1
+# EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" stat $B0/${V0}5/FILE1
+#
+# That didn't really work, because EXPECT_WITHIN would bail immediately if
+# 'stat' returned an error - which it would if the file wasn't there yet.
+# Since changing this, I usually see at least a few retries, and sometimes more
+# than twenty, before the check for HL_FILE1 succeeds. The 'ls' is also
+# necessary, to force a name heal as well as data. With both that and the
+# 'stat' on $M0 being done here for every retry, there's no longer any need to
+# have them elsewhere.
+#
+# If we had EW_RETRIES support (https://review.gluster.org/#/c/16451/) we could
+# use it here to see how many retries are typical on the machines we use for
+# regression, and set an appropriate upper bound. As of right now, though,
+# that support does not exist yet.
+ugly_stat () {
+ local client_dir=$1
+ local brick_dir=$2
+ local bare_file=$3
+
+ ls $client_dir
+ stat -c %s $client_dir/$bare_file
+ stat -c %s $brick_dir/$bare_file 2> /dev/null || echo "UNKNOWN"
+}
#Access files
-TEST cat $M0/FILE1
-EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" stat -c %s $B0/${V0}5/FILE1
-
-TEST cat $M0/HL_FILE1
-EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" stat -c %s $B0/${V0}5/HL_FILE1
+EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" ugly_stat $M0 $B0/${V0}5 FILE1
+EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" ugly_stat $M0 $B0/${V0}5 HL_FILE1
cleanup;