summaryrefslogtreecommitdiffstats
path: root/tests/bugs/shard/zero-flag.t
diff options
context:
space:
mode:
authorJeff Darcy <jdarcy@redhat.com>2016-12-08 16:24:15 -0500
committerVijay Bellur <vbellur@redhat.com>2017-01-30 19:13:58 -0500
commit1a95fc3036db51b82b6a80952f0908bc2019d24a (patch)
treeb983ac196a8165d5cb5e860a5ef97d3e9a41b5c9 /tests/bugs/shard/zero-flag.t
parent7f7d7a939e46b330a084d974451eee4757ba61b4 (diff)
core: run many bricks within one glusterfsd process
This patch adds support for multiple brick translator stacks running in a single brick server process. This reduces our per-brick memory usage by approximately 3x, and our appetite for TCP ports even more. It also creates potential to avoid process/thread thrashing, and to improve QoS by scheduling more carefully across the bricks, but realizing that potential will require further work. Multiplexing is controlled by the "cluster.brick-multiplex" global option. By default it's off, and bricks are started in separate processes as before. If multiplexing is enabled, then *compatible* bricks (mostly those with the same transport options) will be started in the same process. Change-Id: I45059454e51d6f4cbb29a4953359c09a408695cb BUG: 1385758 Signed-off-by: Jeff Darcy <jdarcy@redhat.com> Reviewed-on: https://review.gluster.org/14763 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'tests/bugs/shard/zero-flag.t')
-rw-r--r--tests/bugs/shard/zero-flag.t8
1 files changed, 4 insertions, 4 deletions
diff --git a/tests/bugs/shard/zero-flag.t b/tests/bugs/shard/zero-flag.t
index 6996150cd0e..84cb9635a1b 100644
--- a/tests/bugs/shard/zero-flag.t
+++ b/tests/bugs/shard/zero-flag.t
@@ -27,7 +27,7 @@ TEST touch $M0/file1
gfid_file1=$(get_gfid_string $M0/file1)
-TEST $(dirname $0)/zero-flag $H0 $V0 "0" "0" "6291456" /file1 `gluster --print-logdir`/glfs-$V0.log
+TEST $(dirname $0)/shard-fallocate $H0 $V0 "0" "0" "6291456" /file1 `gluster --print-logdir`/glfs-$V0.log
EXPECT '6291456' stat -c %s $M0/file1
@@ -47,7 +47,7 @@ TEST truncate -s 6M $M0/file2
TEST dd if=$M0/tmp of=$M0/file2 bs=1 seek=3145728 count=26 conv=notrunc
md5sum_file2=$(md5sum $M0/file2 | awk '{print $1}')
-TEST $(dirname $0)/zero-flag $H0 $V0 "0" "3145728" "26" /file2 `gluster --print-logdir`/glfs-$V0.log
+TEST $(dirname $0)/shard-fallocate $H0 $V0 "0" "3145728" "26" /file2 `gluster --print-logdir`/glfs-$V0.log
EXPECT '6291456' stat -c %s $M0/file2
EXPECT "$md5sum_file2" echo `md5sum $M0/file2 | awk '{print $1}'`
@@ -65,11 +65,11 @@ TEST stat $B0/$V0*/.shard/$gfid_file3.2
md5sum_file3=$(md5sum $M0/file3 | awk '{print $1}')
EXPECT "1048602" echo `find $B0 -name $gfid_file3.2 | xargs stat -c %s`
-TEST $(dirname $0)/zero-flag $H0 $V0 "0" "5242880" "1048576" /file3 `gluster --print-logdir`/glfs-$V0.log
+TEST $(dirname $0)/shard-fallocate $H0 $V0 "0" "5242880" "1048576" /file3 `gluster --print-logdir`/glfs-$V0.log
EXPECT "$md5sum_file3" echo `md5sum $M0/file3 | awk '{print $1}'`
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0
-rm -f $(dirname $0)/zero-flag
+rm -f $(dirname $0)/shard-fallocate
cleanup