summaryrefslogtreecommitdiffstats
path: root/tests/bugs/glusterd
diff options
context:
space:
mode:
Diffstat (limited to 'tests/bugs/glusterd')
-rwxr-xr-xtests/bugs/glusterd/859927/repl.t13
-rw-r--r--tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t110
-rw-r--r--tests/bugs/glusterd/brick-mux-validation-in-cluster.t108
-rw-r--r--tests/bugs/glusterd/brick-mux-validation.t104
-rw-r--r--tests/bugs/glusterd/brick-mux.t81
-rw-r--r--tests/bugs/glusterd/brick-order-check-add-brick.t61
-rwxr-xr-xtests/bugs/glusterd/bug-000000.t9
-rwxr-xr-xtests/bugs/glusterd/bug-1002556.t25
-rw-r--r--tests/bugs/glusterd/bug-1004744.t46
-rwxr-xr-xtests/bugs/glusterd/bug-1022055.t26
-rw-r--r--tests/bugs/glusterd/bug-1027171.t53
-rw-r--r--tests/bugs/glusterd/bug-1040408.t31
-rw-r--r--tests/bugs/glusterd/bug-1046308.t19
-rw-r--r--tests/bugs/glusterd/bug-1047955.t23
-rwxr-xr-xtests/bugs/glusterd/bug-1070734.t10
-rw-r--r--tests/bugs/glusterd/bug-1075087.t33
-rw-r--r--[-rwxr-xr-x]tests/bugs/glusterd/bug-1085330-and-bug-916549.t (renamed from tests/bugs/glusterd/bug-1085330.t)17
-rwxr-xr-xtests/bugs/glusterd/bug-1089668.t27
-rw-r--r--tests/bugs/glusterd/bug-1092841.t24
-rwxr-xr-xtests/bugs/glusterd/bug-1095097.t21
-rw-r--r--tests/bugs/glusterd/bug-1102656.t20
-rw-r--r--tests/bugs/glusterd/bug-1104642.t47
-rw-r--r--tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t50
-rw-r--r--tests/bugs/glusterd/bug-1111041.t36
-rw-r--r--tests/bugs/glusterd/bug-1120647.t17
-rw-r--r--tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t37
-rwxr-xr-xtests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t34
-rw-r--r--tests/bugs/glusterd/bug-1177132-quorum-validation.t64
-rw-r--r--tests/bugs/glusterd/bug-1179175-uss-option-validation.t37
-rw-r--r--tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t44
-rw-r--r--tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t38
-rw-r--r--tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t29
-rw-r--r--tests/bugs/glusterd/bug-1595320.t93
-rw-r--r--tests/bugs/glusterd/bug-1696046.t113
-rw-r--r--tests/bugs/glusterd/bug-1699339.t73
-rw-r--r--tests/bugs/glusterd/bug-1720566.t50
-rwxr-xr-xtests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t62
-rwxr-xr-xtests/bugs/glusterd/bug-782095.t48
-rw-r--r--tests/bugs/glusterd/bug-824753-file-locker.c16
-rwxr-xr-xtests/bugs/glusterd/bug-824753.t2
-rw-r--r--tests/bugs/glusterd/bug-839595.t31
-rw-r--r--tests/bugs/glusterd/bug-857330/common.rc55
-rwxr-xr-xtests/bugs/glusterd/bug-857330/normal.t79
-rwxr-xr-xtests/bugs/glusterd/bug-857330/xml.t103
-rwxr-xr-xtests/bugs/glusterd/bug-859927.t70
-rwxr-xr-xtests/bugs/glusterd/bug-862834.t46
-rw-r--r--tests/bugs/glusterd/bug-878004.t29
-rw-r--r--tests/bugs/glusterd/bug-888752.t24
-rwxr-xr-xtests/bugs/glusterd/bug-889630.t56
-rw-r--r--tests/bugs/glusterd/bug-905307.t36
-rw-r--r--tests/bugs/glusterd/bug-913487.t14
-rwxr-xr-xtests/bugs/glusterd/bug-913555.t54
-rwxr-xr-xtests/bugs/glusterd/bug-916549.t19
-rwxr-xr-xtests/bugs/glusterd/bug-948686.t46
-rw-r--r--tests/bugs/glusterd/bug-949930.t2
-rwxr-xr-xtests/bugs/glusterd/bug-955588.t27
-rw-r--r--tests/bugs/glusterd/bug-958790.t21
-rw-r--r--tests/bugs/glusterd/bug-961669.t48
-rwxr-xr-xtests/bugs/glusterd/bug-963541.t33
-rwxr-xr-xtests/bugs/glusterd/bug-964059.t30
-rw-r--r--tests/bugs/glusterd/check_elastic_server.t63
-rw-r--r--tests/bugs/glusterd/daemon-log-level-option.t93
-rw-r--r--tests/bugs/glusterd/df-results-post-replace-brick-operations.t61
-rw-r--r--tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t71
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t115
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases.t305
-rw-r--r--tests/bugs/glusterd/quorum-validation.t122
-rw-r--r--tests/bugs/glusterd/rebalance-in-cluster.t52
-rw-r--r--tests/bugs/glusterd/rebalance-operations-in-single-node.t131
-rw-r--r--tests/bugs/glusterd/remove-brick-in-cluster.t60
-rw-r--r--tests/bugs/glusterd/remove-brick-testcases.t119
-rw-r--r--tests/bugs/glusterd/remove-brick-validation.t68
-rw-r--r--tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t (renamed from tests/bugs/glusterd/bug-974007.t)40
-rw-r--r--tests/bugs/glusterd/replace-brick-operations.t48
-rw-r--r--tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t63
-rw-r--r--tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t54
-rw-r--r--tests/bugs/glusterd/snapshot-operations.t50
-rw-r--r--tests/bugs/glusterd/sync-post-glusterd-restart.t54
-rw-r--r--tests/bugs/glusterd/validating-options-for-replicated-volume.t142
-rw-r--r--tests/bugs/glusterd/validating-server-quorum.t125
80 files changed, 2779 insertions, 1631 deletions
diff --git a/tests/bugs/glusterd/859927/repl.t b/tests/bugs/glusterd/859927/repl.t
index a500961165c..6e7c23b5b1d 100755
--- a/tests/bugs/glusterd/859927/repl.t
+++ b/tests/bugs/glusterd/859927/repl.t
@@ -23,7 +23,9 @@ TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 client-log-level DEBUG
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
TEST $CLI volume start $V0
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0;
@@ -33,20 +35,23 @@ TEST $CLI volume set $V0 cluster.data-self-heal-algorithm full
EXPECT full volume_option $V0 cluster.data-self-heal-algorithm
create_setup_for_self_heal $M0/a
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
-cat $file 2>&1 > /dev/null
+cat $file > /dev/null 2>&1
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
TEST cmp $B0/${V0}1/a $B0/${V0}2/a
TEST $CLI volume set $V0 cluster.data-self-heal-algorithm diff
EXPECT diff volume_option $V0 cluster.data-self-heal-algorithm
create_setup_for_self_heal $M0/a
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
-cat $file 2>&1 > /dev/null
+cat $file > /dev/null 2>&1
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
TEST cmp $B0/${V0}1/a $B0/${V0}2/a
TEST $CLI volume reset $V0 cluster.data-self-heal-algorithm
create_setup_for_self_heal $M0/a
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
-cat $file 2>&1 > /dev/null
+cat $file > /dev/null 2>&1
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
TEST cmp $B0/${V0}1/a $B0/${V0}2/a
TEST ! $CLI volume set $V0 cluster.data-self-heal-algorithm ""
diff --git a/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t b/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t
new file mode 100644
index 00000000000..95d0eb69ac1
--- /dev/null
+++ b/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t
@@ -0,0 +1,110 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+
+#bug-1102656 - validating volume top command
+
+TEST $CLI volume top $V0 open
+TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick
+TEST $CLI volume top $V0 read
+
+TEST $CLI volume status
+
+#bug- 1002556
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}3 force
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST killall glusterd
+TEST glusterd
+
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+#bug-1406411- fail-add-brick-when-replica-count-changes
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#add-brick should fail
+TEST ! $CLI_NO_FORCE volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
+TEST $CLI volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+TEST kill_brick $V1 $H0 $B0/${V1}1
+
+#add-brick should fail
+TEST ! $CLI_NO_FORCE volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
+
+TEST $CLI volume start $V1 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+
+TEST $CLI volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
+
+#bug-905307 - validate cluster.post-op-delay-secs option
+
+#Strings should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
+
+#-ve ints should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
+
+#INT_MAX+1 should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
+
+#floats should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
+
+#min val 0 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
+EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
+
+#max val 2147483647 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
+EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
+
+#some middle val in range 2147 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
+EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
+
+#bug-1265479 - validate-replica-volume-options
+
+#Setting data-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 data-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
+TEST $CLI volume set $V1 cluster.data-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
+
+#Setting metadata-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 metadata-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.metadata-self-heal';
+TEST $CLI volume set $V1 cluster.metadata-self-heal on
+
+#Setting entry-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 entry-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
+TEST $CLI volume set $V1 cluster.entry-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
+
+cleanup
diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
new file mode 100644
index 00000000000..b6af487a791
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+function count_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
+}
+
+function count_N/A_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -- '\-1' | sort | uniq | wc -l
+}
+
+function check_peers {
+ $CLI_2 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 3
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 volume set all cluster.brick-multiplex on
+#bug-1609163 - bricks of normal volume should not attach to bricks of gluster_shared_storage volume
+
+##Create, start and mount meta_volume i.e., shared_storage
+TEST $CLI_1 volume create $META_VOL replica 3 $H1:$B1/${META_VOL}1 $H2:$B2/${META_VOL}1 $H3:$B3/${META_VOL}1
+TEST $CLI_1 volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H1 --volfile-id $META_VOL $META_MNT
+
+TEST $CLI_1 volume info gluster_shared_storage
+
+EXPECT 3 count_brick_processes
+
+#create and start a new volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}{1..3} $H2:$B2/${V0}{1..3}
+TEST $CLI_1 volume start $V0
+
+# bricks of normal volume should not attach to bricks of gluster_shared_storage volume
+EXPECT 5 count_brick_processes
+
+#bug-1549996 - stale brick processes on the nodes after volume deletion
+
+TEST $CLI_1 volume create $V1 replica 3 $H1:$B1/${V1}{1..3} $H2:$B2/${V1}{1..3}
+TEST $CLI_1 volume start $V1
+
+EXPECT 5 count_brick_processes
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume stop $V1
+
+EXPECT 3 count_brick_processes
+
+TEST $CLI_1 volume stop $META_VOL
+
+TEST $CLI_1 volume delete $META_VOL
+TEST $CLI_1 volume delete $V0
+TEST $CLI_1 volume delete $V1
+
+#bug-1773856 - Brick process fails to come up with brickmux on
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 $H3:$B3/${V0}1 force
+TEST $CLI_1 volume start $V0
+
+
+EXPECT 3 count_brick_processes
+
+#create and start a new volume
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}2 $H2:$B2/${V1}2 $H3:$B3/${V1}2 force
+TEST $CLI_1 volume start $V1
+
+EXPECT 3 count_brick_processes
+
+V2=patchy2
+TEST $CLI_1 volume create $V2 $H1:$B1/${V2}3 $H2:$B2/${V2}3 $H3:$B3/${V2}3 force
+TEST $CLI_1 volume start $V2
+
+EXPECT 3 count_brick_processes
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
+
+TEST kill_node 1
+
+sleep 10
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+$CLI_2 volume set $V0 performance.readdir-ahead on
+$CLI_2 volume set $V1 performance.readdir-ahead on
+
+TEST $glusterd_1;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_N/A_brick_pids
+
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux-validation.t b/tests/bugs/glusterd/brick-mux-validation.t
new file mode 100644
index 00000000000..61b0455f9a8
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux-validation.t
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../volume.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+function count_brick_pids {
+ $CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
+}
+
+cleanup;
+
+#bug-1451248 - validate brick mux after glusterd reboot
+
+TEST glusterd
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}
+TEST $CLI volume start $V0
+
+EXPECT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
+
+pkill gluster
+TEST glusterd
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1..3}
+TEST $CLI volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+
+#bug-1560957 - brick status goes offline after remove-brick followed by add-brick
+
+pkill glusterd
+TEST glusterd
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 force
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1_new force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+
+#bug-1446172 - reset brick with brick multiplexing enabled
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1_new start
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
+EXPECT 1 count_brick_processes
+
+# Negative case with brick killed but volume-id xattr present
+TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit
+
+# reset-brick commit force should work and should bring up the brick
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_new commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+EXPECT 1 count_brick_processes
+TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 $M1;
+# Create files
+for i in {1..5}
+do
+ echo $i > $M1/file$i.txt
+done
+
+TEST $CLI volume reset-brick $V1 $H0:$B0/${V1}1 start
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
+EXPECT 1 count_brick_processes
+
+# Simulate reset disk
+for i in {1..5}
+do
+ rm -rf $B0/${V1}1/file$i.txt
+done
+
+setfattr -x trusted.glusterfs.volume-id $B0/${V1}1
+setfattr -x trusted.gfid $B0/${V1}1
+
+# Test reset-brick commit. Using CLI_IGNORE_PARTITION since normal CLI uses
+# the --wignore flag that essentially makes the command act like "commit force"
+TEST $CLI_IGNORE_PARTITION volume reset-brick $V1 $H0:$B0/${V1}1 $H0:$B0/${V1}1 commit
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+EXPECT 1 count_brick_processes
+
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux.t b/tests/bugs/glusterd/brick-mux.t
new file mode 100644
index 00000000000..927940534c1
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux.t
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+cleanup
+
+#bug-1444596 - validating brick mux
+
+TEST glusterd -LDEBUG
+TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
+TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
+
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume start $V0
+TEST $CLI volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+#bug-1499509 - stop all the bricks when a brick process is killed
+kill -9 $(pgrep glusterfsd)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 online_brick_count
+
+TEST $CLI volume start $V0 force
+TEST $CLI volume start $V1 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+TEST $CLI volume set $V1 performance.io-cache-size 32MB
+TEST $CLI volume stop $V1
+TEST $CLI volume start $V1
+
+#Check No. of brick processes after change option
+EXPECT 2 count_brick_processes
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd should not be NA
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 2 count_brick_processes
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd should not be NA
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 2 count_brick_processes
+
+#bug-1444596_brick_mux_posix_hlth_chk_status
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST rm -rf $H0:$B0/brick{0,1}
+
+#Check No. of brick processes after remove brick from back-end
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
+
+TEST glusterfs -s $H0 --volfile-id $V1 $M0
+TEST touch $M0/file{1..10}
+
+pkill glusterd
+TEST glusterd -LDEBUG
+sleep 5
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
+
+cleanup
+
diff --git a/tests/bugs/glusterd/brick-order-check-add-brick.t b/tests/bugs/glusterd/brick-order-check-add-brick.t
new file mode 100644
index 00000000000..0be31dac768
--- /dev/null
+++ b/tests/bugs/glusterd/brick-order-check-add-brick.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST verify_lvm_version;
+#Create cluster with 3 nodes
+TEST launch_cluster 3 -NO_DEBUG -NO_FORCE
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 volume create $V0 replica 3 $H1:$L1/$V0 $H2:$L2/$V0 $H3:$L3/$V0
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#add-brick with or without mentioning the replica count should not fail
+TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}_1 $H2:$L2/${V0}_1 $H3:$L3/${V0}_1
+EXPECT '2 x 3 = 6' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_2 $H2:$L2/${V0}_2 $H3:$L3/${V0}_2
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host with force should succeed
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 force
+EXPECT '4 x 3 = 12' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1/${V0}1 $H2:$L2/${V0}1
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#Add-brick with Increasing replica count
+TEST $CLI_1 volume add-brick $V0 replica 3 $H3:$L3/${V0}1
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+#Add-brick with Increasing replica count from same host should fail
+TEST ! $CLI_1 volume add-brick $V0 replica 5 $H1:$L1/${V0}2 $H1:$L1/${V0}3
+
+#adding multiple bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}{4..6} $H2:$L2/${V0}{7..9}
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-000000.t b/tests/bugs/glusterd/bug-000000.t
deleted file mode 100755
index 55f7b11f598..00000000000
--- a/tests/bugs/glusterd/bug-000000.t
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1002556.t b/tests/bugs/glusterd/bug-1002556.t
deleted file mode 100755
index ac71d06d533..00000000000
--- a/tests/bugs/glusterd/bug-1002556.t
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-TEST $CLI volume start $V0
-EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
-
-TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
-EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
-
-TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}1 force
-EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
-
-TEST killall glusterd
-TEST glusterd
-
-EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
-cleanup
diff --git a/tests/bugs/glusterd/bug-1004744.t b/tests/bugs/glusterd/bug-1004744.t
deleted file mode 100644
index b48ed97fb52..00000000000
--- a/tests/bugs/glusterd/bug-1004744.t
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-#Test case: After a rebalance fix-layout, check if the rebalance status command
-#displays the appropriate message at the CLI.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-#Create a 2x1 distributed volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume start $V0
-
-# Mount FUSE and create file/directory
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-for i in `seq 1 10`;
-do
- mkdir $M0/dir_$i
- echo file>$M0/dir_$i/file_$i
- for j in `seq 1 100`;
- do
- mkdir $M0/dir_$i/dir_$j
- echo file>$M0/dir_$i/dir_$j/file_$j
- done
-done
-
-#add 2 bricks
-TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{3,4};
-
-#perform rebalance fix-layout
-TEST $CLI volume rebalance $V0 fix-layout start
-
-EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0;
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1022055.t b/tests/bugs/glusterd/bug-1022055.t
deleted file mode 100755
index 9f39c80b6b6..00000000000
--- a/tests/bugs/glusterd/bug-1022055.t
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-cleanup;
-
-TEST launch_cluster 2;
-
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0;
-
-TEST $CLI_1 volume start $V0;
-
-TEST $CLI_1 volume log rotate $V0;
-
-TEST $CLI_1 volume status;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1027171.t b/tests/bugs/glusterd/bug-1027171.t
deleted file mode 100644
index 1b457d8f660..00000000000
--- a/tests/bugs/glusterd/bug-1027171.t
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-
-#Test case: Do not allow commit if the bricks are not decommissioned
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-#Create a Distributed volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
-TEST $CLI volume start $V0
-
-#Remove bricks and commit without starting
-function remove_brick_commit_status {
- $CLI volume remove-brick $V0 \
- $H0:$B0/${V0}2 commit 2>&1 |grep -oE "success|decommissioned"
-}
-EXPECT "decommissioned" remove_brick_commit_status;
-
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0
-TEST ! $CLI volume info $V0
-
-#Create a Distributed-Replicate volume
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..4};
-TEST $CLI volume start $V0
-
-#Try to reduce replica count with start option
-function remove_brick_start_status {
- $CLI volume remove-brick $V0 replica 1 \
- $H0:$B0/${V0}1 $H0:$B0/${V0}3 start 2>&1 |grep -oE "success|failed"
-}
-EXPECT "failed" remove_brick_start_status;
-
-#Remove bricks with commit option
-function remove_brick_commit_status2 {
- $CLI volume remove-brick $V0 replica 1 \
- $H0:$B0/${V0}1 $H0:$B0/${V0}3 commit 2>&1 |
- grep -oE "success|decommissioned"
-}
-EXPECT "decommissioned" remove_brick_commit_status2;
-
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0
-TEST ! $CLI volume info $V0
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1040408.t b/tests/bugs/glusterd/bug-1040408.t
deleted file mode 100644
index c378000630b..00000000000
--- a/tests/bugs/glusterd/bug-1040408.t
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-#Test case: Create a distributed replicate volume, and reduce
-#replica count
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-#Create a 2X3 distributed-replicate volume
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6};
-TEST $CLI volume start $V0
-
-# Reduce to 2x2 volume by specifying bricks in reverse order
-function remove_brick_status {
- $CLI volume remove-brick $V0 replica 2 \
- $H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed"
-}
-EXPECT "success" remove_brick_status;
-
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1046308.t b/tests/bugs/glusterd/bug-1046308.t
deleted file mode 100644
index 9c827c4a492..00000000000
--- a/tests/bugs/glusterd/bug-1046308.t
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-volname="StartMigrationDuringRebalanceTest"
-TEST glusterd
-TEST pidof glusterd;
-
-TEST $CLI volume info;
-TEST $CLI volume create $volname $H0:$B0/${volname}{1,2};
-TEST $CLI volume start $volname;
-TEST $CLI volume rebalance $volname start;
-
-cleanup;
-
-
-
diff --git a/tests/bugs/glusterd/bug-1047955.t b/tests/bugs/glusterd/bug-1047955.t
deleted file mode 100644
index a409d9f7195..00000000000
--- a/tests/bugs/glusterd/bug-1047955.t
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-cleanup;
-
-# Create a 2x2 dist-rep volume; peer probe a new node.
-# Performing remove-brick from this new node must succeed
-# without crashing it's glusterd
-
-TEST launch_cluster 2;
-TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
-TEST $CLI_1 volume start $V0;
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
-TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
-TEST $CLI_2 volume info
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1070734.t b/tests/bugs/glusterd/bug-1070734.t
index b5a53c24cab..0afcb3b37b3 100755
--- a/tests/bugs/glusterd/bug-1070734.t
+++ b/tests/bugs/glusterd/bug-1070734.t
@@ -4,6 +4,8 @@
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
## Start glusterd
@@ -17,6 +19,7 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
## Verify volume is created
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 nfs.disable false
## Start volume and verify
TEST $CLI volume start $V0;
@@ -65,8 +68,11 @@ TEST [ -f ${OTHERBRICK}/DIR/file ]
#Check the DIR on HASHED should have got zeroed layout and the \
#OTHERBRICK should have got full layout
-EXPECT "0x00000001000000000000000000000000" dht_get_layout $HASHED/DIR ;
-EXPECT "0x000000010000000000000000ffffffff" dht_get_layout $OTHERBRICK/DIR;
+shorter_layout () {
+ dht_get_layout $1 | cut -c 19-
+}
+EXPECT "0000000000000000" shorter_layout $HASHED/DIR ;
+EXPECT "00000000ffffffff" shorter_layout $OTHERBRICK/DIR;
## Before killing daemon to avoid deadlocks
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
diff --git a/tests/bugs/glusterd/bug-1075087.t b/tests/bugs/glusterd/bug-1075087.t
deleted file mode 100644
index 35155a0b8c9..00000000000
--- a/tests/bugs/glusterd/bug-1075087.t
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
- $H0:$B0/${V0}2 $H0:$B0/${V0}3
-TEST $CLI volume start $V0
-
-## Mount FUSE
-TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
-
-TEST mkdir $M0/dir{1..10};
-TEST touch $M0/dir{1..10}/files{1..10};
-
-TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5
-
-TEST $CLI volume rebalance $V0 start force
-EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
-
-TEST pkill gluster
-TEST glusterd
-TEST pidof glusterd
-
-# status should be "completed" immediate after glusterd has respawned.
-EXPECT_WITHIN 5 "completed" rebalance_status_field $V0
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1085330.t b/tests/bugs/glusterd/bug-1085330-and-bug-916549.t
index ffcfe9274eb..892a30d74ea 100755..100644
--- a/tests/bugs/glusterd/bug-1085330.t
+++ b/tests/bugs/glusterd/bug-1085330-and-bug-916549.t
@@ -11,6 +11,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
+#testcase: bug-1085330
# Construct volname string such that its more than 256 characters
for i in {1..30}
@@ -73,8 +74,20 @@ TEST ! $CLI volume create $volname $H0:$B0/$brick;
TEST $CLI volume info;
# Positive test case
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
TEST $CLI volume info;
-cleanup;
+TEST $CLI volume start $V0;
+
+#testcase: bug-916549
+
+pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/);
+brick_pid=$(cat $GLUSTERD_PIDFILEDIR/vols/$V0/$pid_file);
+
+kill -SIGKILL $brick_pid;
+TEST $CLI volume start $V0 force;
+TEST process_leak_count $(pidof glusterd);
+
+cleanup
+
diff --git a/tests/bugs/glusterd/bug-1089668.t b/tests/bugs/glusterd/bug-1089668.t
deleted file mode 100755
index f2b99bf6051..00000000000
--- a/tests/bugs/glusterd/bug-1089668.t
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../dht.rc
-
-cleanup
-
-#This script checks command "gluster volume rebalance <volname> status will not
-#show any output when user have done only remove-brick start and command
-#'gluster volume remove-brick <volname> <brick_name> status' will not show
-#any output when user have triggered only rebalance start.
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
-TEST $CLI volume start $V0
-
-TEST $CLI volume rebalance $V0 start
-TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 status
-
-TEST $CLI volume rebalance $V0 stop
-
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
-TEST ! $CLI volume rebalance $V0 status
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-1092841.t b/tests/bugs/glusterd/bug-1092841.t
deleted file mode 100644
index d3dcf07fd02..00000000000
--- a/tests/bugs/glusterd/bug-1092841.t
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-
-TEST $CLI volume start $V0;
-
-TEST $CLI volume barrier $V0 enable;
-
-TEST ! $CLI volume barrier $V0 enable;
-
-TEST $CLI volume barrier $V0 disable;
-
-TEST ! $CLI volume barrier $V0 disable;
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-1095097.t b/tests/bugs/glusterd/bug-1095097.t
deleted file mode 100755
index 0fe29f06630..00000000000
--- a/tests/bugs/glusterd/bug-1095097.t
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B1/brick1;
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-TEST $CLI volume profile $V0 start
-TEST $CLI volume profile $V0 info
-TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick2 start
-TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick2 status
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1102656.t b/tests/bugs/glusterd/bug-1102656.t
deleted file mode 100644
index e80f4930a63..00000000000
--- a/tests/bugs/glusterd/bug-1102656.t
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
-TEST $CLI volume start $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
-
-TEST $CLI volume top $V0 open
-TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick
-TEST $CLI volume top $V0 read
-
-TEST $CLI volume status
-TEST $CLI volume stop $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status';
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1104642.t b/tests/bugs/glusterd/bug-1104642.t
deleted file mode 100644
index 000093a8ae2..00000000000
--- a/tests/bugs/glusterd/bug-1104642.t
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-function get_value()
-{
- local key=$1
- local var="CLI_$2"
-
- eval cli_index=\$$var
-
- $cli_index volume info | grep "^$key"\
- | sed 's/.*: //'
-}
-
-cleanup
-
-TEST launch_cluster 2
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
-EXPECT "$V0" get_value 'Volume Name' 1
-EXPECT "Created" get_value 'Status' 1
-
-TEST $CLI_1 volume start $V0
-EXPECT "Started" get_value 'Status' 1
-
-#Bring down 2nd glusterd
-TEST kill_glusterd 2
-
-#set the volume all options from the 1st glusterd
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
-
-#Bring back the 2nd glusterd
-TEST $glusterd_2
-
-#Verify whether the value has been synced
-EXPECT '80' get_value 'cluster.server-quorum-ratio' 1
-EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
-EXPECT_WITHIN $PROBE_TIMEOUT '80' get_value 'cluster.server-quorum-ratio' 2
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t b/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t
deleted file mode 100644
index 561b90740fa..00000000000
--- a/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t
+++ /dev/null
@@ -1,50 +0,0 @@
-#! /bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-# The test will attempt to verify that management handshake requests to
-# GlusterD are authenticated before being allowed to change a GlusterD's
-# op-version
-#
-# 1. Launch 3 glusterds
-# 2. Probe 2 of them to form a cluster. This should succeed.
-# 3. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail.
-# 4. a. Reduce the op-version of 3rd GlusterD and restart it.
-# b. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail.
-# 5. Check current op-version of first two GlusterDs. It shouldn't have changed.
-# 6. Probe third GlusterD from the cluster. This should succeed.
-
-
-cleanup
-
-TEST launch_cluster 3
-
-TEST $CLI_1 peer probe $H2
-
-TEST ! $CLI_3 peer probe $H1
-
-GD1_WD=$($CLI_1 system getwd)
-OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
-
-TEST $CLI_3 system uuid get # Needed for glusterd.info to be created
-
-GD3_WD=$($CLI_3 system getwd)
-TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info
-
-TEST kill_glusterd 3
-TEST start_glusterd 3
-
-TEST ! $CLI_3 peer probe $H1
-
-OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
-TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
-
-TEST $CLI_1 peer probe $H3
-
-kill_node 1
-kill_node 2
-kill_node 3
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-1111041.t b/tests/bugs/glusterd/bug-1111041.t
deleted file mode 100644
index caaece0fcbb..00000000000
--- a/tests/bugs/glusterd/bug-1111041.t
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../fileio.rc
-. $(dirname $0)/../../nfs.rc
-
-cleanup;
-
-function is_snapd_running {
- $CLI volume status $1 | grep "Snapshot Daemon" | wc -l;
-}
-
-TEST glusterd;
-
-TEST pidof glusterd;
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
-
-TEST $CLI volume start $V0;
-
-EXPECT "0" is_snapd_running $v0
-
-TEST $CLI volume set $V0 features.uss enable;
-
-EXPECT "1" is_snapd_running $V0
-
-SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}');
-
-TEST [ $SNAPD_PID -gt 0 ];
-
-SNAPD_PID2=$($CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $8'});
-
-TEST [ $SNAPD_PID -eq $SNAPD_PID2 ]
-
-cleanup ;
diff --git a/tests/bugs/glusterd/bug-1120647.t b/tests/bugs/glusterd/bug-1120647.t
deleted file mode 100644
index 0223f460398..00000000000
--- a/tests/bugs/glusterd/bug-1120647.t
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{1..4}
-TEST $CLI volume start $V0
-TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} start
-EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/brick{3..4}"
-TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} commit
-TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/brick2 force
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t b/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
deleted file mode 100644
index 9fc7ac3b845..00000000000
--- a/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-## Test case for cluster.min-free-disk option validation.
-
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd
-TEST pidof glusterd
-
-## Lets create and start volume
-TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
-TEST $CLI volume start $V0
-
-## Setting invalid value for option cluster.min-free-disk should fail
-TEST ! $CLI volume set $V0 min-free-disk ""
-TEST ! $CLI volume set $V0 min-free-disk 143.!/12
-TEST ! $CLI volume set $V0 min-free-disk 123%
-TEST ! $CLI volume set $V0 min-free-disk 194.34%
-
-## Setting fractional value as a size (unit is byte) for option
-## cluster.min-free-disk should fail
-TEST ! $CLI volume set $V0 min-free-disk 199.051
-TEST ! $CLI volume set $V0 min-free-disk 111.999
-
-## Setting valid value for option cluster.min-free-disk should pass
-TEST $CLI volume set $V0 min-free-disk 12%
-TEST $CLI volume set $V0 min-free-disk 56.7%
-TEST $CLI volume set $V0 min-free-disk 120
-TEST $CLI volume set $V0 min-free-disk 369.0000
-
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t b/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t
deleted file mode 100755
index 5a6cf81fd53..00000000000
--- a/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-cleanup;
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0
-TEST $CLI_1 volume create $V1 $H1:$B1/$V1
-TEST $CLI_1 volume start $V0
-TEST $CLI_1 volume start $V1
-
-for i in {1..20}
-do
- $CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
- $CLI_1 volume set $V1 barrier on
- $CLI_2 volume set $V0 diagnostics.client-log-level DEBUG &
- $CLI_2 volume set $V1 barrier on
-done
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-TEST $CLI_1 volume status
-TEST $CLI_2 volume status
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1177132-quorum-validation.t b/tests/bugs/glusterd/bug-1177132-quorum-validation.t
deleted file mode 100644
index 57aec5ccf57..00000000000
--- a/tests/bugs/glusterd/bug-1177132-quorum-validation.t
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-
-# Test case for quorum validation in glusterd for syncop framework
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-cleanup;
-
-TEST launch_cluster 2
-
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Lets create the volume and set quorum type as a server
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
-TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
-
-# Start the volume
-TEST $CLI_1 volume start $V0
-
-# Set quorum ratio 52. means 52 % or more than 52% nodes of total available node
-# should be available for performing volume operation.
-# i.e. Server-side quorum is met if the number of nodes that are available is
-# greater than or equal to 'quorum-ratio' times the number of nodes in the
-# cluster
-
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 52
-
-# Bring down 2nd glusterd
-TEST kill_glusterd 2
-
-# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
-#(Command based on syncop framework)should fail
-TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1
-TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
-TEST ! $CLI_1 volume set $V0 barrier enable
-
-# Now execute a command which goes through op state machine and it should fail
-TEST ! $CLI_1 volume profile $V0 start
-
-# Volume set all command and volume reset all command should be successful
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
-TEST $CLI_1 volume reset all
-
-# Bring back 2nd glusterd
-TEST $glusterd_2
-
-# After 2nd glusterd come back, there will be 2 nodes in a clusater
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
-
-# Now quorum is meet.
-# Add-brick, Remove-brick, volume-set command should success
-TEST $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
-TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
-TEST $CLI_1 volume set $V0 barrier enable
-TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop
-
-# Now re-execute the same profile command and this time it should succeed
-TEST $CLI_1 volume profile $V0 start
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1179175-uss-option-validation.t b/tests/bugs/glusterd/bug-1179175-uss-option-validation.t
deleted file mode 100644
index 6bbe3c9336f..00000000000
--- a/tests/bugs/glusterd/bug-1179175-uss-option-validation.t
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-## Test case for option features.uss validation.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd;
-TEST pidof glusterd;
-
-## Lets create and start volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume start $V0
-
-## Set features.uss option with non-boolean value. These non-boolean value
-## for features.uss option should fail.
-TEST ! $CLI volume set $V0 features.uss abcd
-TEST ! $CLI volume set $V0 features.uss #$#$
-TEST ! $CLI volume set $V0 features.uss 2324
-
-## Setting other options with valid value. These options should succeed.
-TEST $CLI volume set $V0 barrier enable
-TEST $CLI volume set $V0 ping-timeout 60
-
-## Set features.uss option with valid boolean value. It should succeed.
-TEST $CLI volume set $V0 features.uss enable
-TEST $CLI volume set $V0 features.uss disable
-
-
-## Setting other options with valid value. These options should succeed.
-TEST $CLI volume set $V0 barrier enable
-TEST $CLI volume set $V0 ping-timeout 60
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t b/tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t
new file mode 100644
index 00000000000..7be076caaf3
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+## Test case for stopping all running daemons service on peer detach.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+
+## Start a 2 node virtual cluster
+TEST launch_cluster 2;
+
+## Peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+
+## Creating and starting volume
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H1:$B1/${V0}1
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume set $V0 nfs.disable off
+
+## To Do: Add test case for quota and snapshot daemon. Currently quota
+## Daemon is not working in cluster framework. And sanpd daemon
+## Start only in one node in cluster framework. Add test case
+## once patch http://review.gluster.org/#/c/11666/ merged,
+
+## We are having 2 node "nfs" daemon should run on both node.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_nfs_count
+
+## Detach 2nd node from the cluster.
+TEST $CLI_1 peer detach $H2;
+
+
+## After detaching 2nd node we will have only 1 nfs and quota daemon running.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t
new file mode 100644
index 00000000000..c229d4371b6
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create volume V0 and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+## Lets create volume V1 and start the volume
+TEST $CLI volume create $V1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume start $V1
+
+## Enable quota on 2nd volume
+TEST $CLI volume quota $V1 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+## Killing all gluster process
+pkill gluster;
+
+## there should not be any quota daemon running after killing quota process
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_quotad_count
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Quotad daemon should start on restarting the glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t b/tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t
new file mode 100644
index 00000000000..967595e4dbb
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+#Tests for add new line in peers file
+function add_new_line_to_peer_file {
+ UUID_NAME=$($CLI_1 peer status | grep Uuid)
+ PEER_ID=$(echo $UUID_NAME | cut -c 7-)
+ GD_WD=$($CLI_1 system getwd)
+ GD_WD+=/peers/
+ PATH_TO_PEER_FILE=$GD_WD$PEER_ID
+ sed -i '1s/^/\n/gm; $s/$/\n/gm' $PATH_TO_PEER_FILE
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+add_new_line_to_peer_file
+
+TEST kill_glusterd 1
+TEST $glusterd_1
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1595320.t b/tests/bugs/glusterd/bug-1595320.t
new file mode 100644
index 00000000000..c10e11821a1
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1595320.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup
+
+function count_up_bricks {
+ $CLI --xml volume status $V0 | grep '<status>1' | wc -l
+}
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+# Setup 3 LVMS
+LVM_PREFIX="test"
+TEST init_n_bricks 3
+TEST setup_lvm 3
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd
+
+# Create volume and enable brick multiplexing
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3
+TEST $CLI v set all cluster.brick-multiplex on
+
+# Start the volume
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
+EXPECT 1 count_brick_processes
+
+# Kill volume ungracefully
+brick_pid=`pgrep glusterfsd`
+
+# Make sure every brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+
+b1_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-1*.pid)
+b2_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-2*.pid)
+b3_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-3*.pid)
+
+kill -9 $brick_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_brick_processes
+
+# Unmount 3rd brick root from node
+brick_root=$L3
+_umount_lv 3
+
+# Start the volume only 2 brick should be start
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks
+EXPECT 1 count_brick_processes
+
+brick_pid=`pgrep glusterfsd`
+
+# Make sure only two brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 0 ]
+
+# Mount the brick root
+TEST mkdir -p $brick_root
+TEST mount -t xfs -o nouuid /dev/test_vg_3/brick_lvm $brick_root
+
+# Replace brick_pid file to test brick_attach code
+TEST cp $b1_pid_file $b3_pid_file
+
+# Start the volume all brick should be up
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
+EXPECT 1 count_brick_processes
+
+# Make sure every brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1696046.t b/tests/bugs/glusterd/bug-1696046.t
new file mode 100644
index 00000000000..e1c1eb2ceb9
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1696046.t
@@ -0,0 +1,113 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function count_up_bricks {
+ $CLI --xml volume status $1 | grep '<status>1' | wc -l
+}
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+logdir=`gluster --print-logdir`
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume set all cluster.brick-multiplex on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3};
+TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{1,2,3};
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume start $V1;
+EXPECT 'Started' volinfo_field $V1 'Status';
+
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V1
+
+EXPECT 1 count_brick_processes
+
+# Mount V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+function client-log-file-name()
+{
+ logfilename=$M0".log"
+ echo ${logfilename:1} | tr / -
+}
+
+function brick-log-file-name()
+{
+ logfilename=$B0"/"$V0"1.log"
+ echo ${logfilename:1} | tr / -
+}
+
+log_file=$logdir"/"`client-log-file-name`
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+brick_log_file=$logdir"/bricks/"`brick-log-file-name`
+nofdlog=$(cat $brick_log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+## Set brick-log-level to DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+
+# Do some operation
+touch $M0/file1
+
+# Check debug message debug message should be exist only for V0
+# Server xlator is common in brick_mux so after enabling DEBUG log
+# some debug message should be available for other xlators like posix
+
+brick_log_file=$logdir"/bricks/"`brick-log-file-name`
+nofdlog=$(cat $brick_log_file | grep file1 | grep -v server | wc -l)
+TEST [ $((nofdlog)) -ne 0 ]
+
+#Check if any debug log exist in client-log file
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+## Set brick-log-level to INFO
+TEST $CLI volume set $V0 diagnostics.brick-log-level INFO
+
+## Set client-log-level to DEBUG
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+
+# Do some operation
+touch $M0/file2
+
+nofdlog=$(cat $brick_log_file | grep " D " | grep file2 | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -ne 0 ]
+
+# Unmount V0
+TEST umount $M0
+
+#Mount V1
+TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+#do some operation
+touch $M0/file3
+
+
+# DEBUG log level is enabled only for V0 so no debug message should be available
+# in log specific to file2 creation except for server xlator, server xlator is
+# common xlator in brick mulitplex
+nofdlog=$(cat $brick_log_file | grep file3 | grep -v server | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+# Unmount V1
+TEST umount $M0
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1699339.t b/tests/bugs/glusterd/bug-1699339.t
new file mode 100644
index 00000000000..bb8d4f46eb8
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1699339.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+NUM_VOLS=15
+
+
+get_brick_base () {
+ printf "%s/vol%02d" $B0 $1
+}
+
+function count_up_bricks {
+ vol=$1;
+ $CLI_1 --xml volume status $vol | grep '<status>1' | wc -l
+}
+
+create_volume () {
+
+ local vol_name=$(printf "%s-vol%02d" $V0 $1)
+
+ TEST $CLI_1 volume create $vol_name replica 3 $H1:$B1/${vol_name} $H2:$B2/${vol_name} $H3:$B3/${vol_name}
+ TEST $CLI_1 volume start $vol_name
+}
+
+TEST launch_cluster 3
+TEST $CLI_1 volume set all cluster.brick-multiplex on
+
+# The option accepts the value in the range from 5 to 200
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 210
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 4
+
+TEST $CLI_1 volume set all glusterd.vol_count_per_thread 5
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Our infrastructure can't handle an arithmetic expression here. The formula
+# is (NUM_VOLS-1)*5 because it sees each TEST/EXPECT once but needs the other
+# NUM_VOLS-1 and there are 5 such statements in each iteration.
+TESTS_EXPECTED_IN_LOOP=28
+for i in $(seq 1 $NUM_VOLS); do
+ starttime="$(date +%s)";
+ create_volume $i
+done
+
+TEST kill_glusterd 1
+
+TESTS_EXPECTED_IN_LOOP=4
+for i in `seq 1 3 15`
+do
+vol1=$(printf "%s-vol%02d" $V0 $i)
+TEST $CLI_2 volume set $vol1 performance.readdir-ahead on
+done
+
+# Bring back 1st glusterd
+TEST $glusterd_1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TESTS_EXPECTED_IN_LOOP=4
+for i in `seq 1 3 15`
+do
+vol1=$(printf "%s-vol%02d" $V0 $i)
+EXPECT_WITHIN $PROBE_TIMEOUT "on" volinfo_field_1 $vol1 performance.readdir-ahead
+done
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1720566.t b/tests/bugs/glusterd/bug-1720566.t
new file mode 100644
index 00000000000..99bcf6ff785
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1720566.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+
+cleanup;
+V0="TestLongVolnamec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9abcd"
+V1="TestLongVolname3102bd28a16c49440bd5210e4ec4d5d93102bd28a16c49440bd5210e4ec4d5d933102bd28a16c49440bd5210e4ebbcd"
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+$CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+EXPECT 'Created' cluster_volinfo_field 1 $V1 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V1
+EXPECT 'Started' cluster_volinfo_field 1 $V1 'Status';
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V1 $M1;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST mkdir $M1/dir{1..4};
+TEST touch $M1/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}_1 $H2:$B2/${V0}_1
+TEST $CLI_1 volume add-brick $V1 $H1:$B1/${V1}_1 $H2:$B2/${V1}_1
+
+
+TEST $CLI_1 volume rebalance $V0 start
+TEST $CLI_1 volume rebalance $V1 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V1
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t b/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t
deleted file mode 100755
index ce26c60696e..00000000000
--- a/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start and create a volume
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
-
-## Verify volume is created
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-## Setting quota-timeout as 20
-TEST ! $CLI volume set $V0 features.quota-timeout 20
-EXPECT '' volinfo_field $V0 'features.quota-timeout';
-
-## Enabling features.quota-deem-statfs
-TEST ! $CLI volume set $V0 features.quota-deem-statfs on
-EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
-
-## Enabling quota
-TEST $CLI volume quota $V0 enable
-EXPECT 'on' volinfo_field $V0 'features.quota'
-
-## Setting quota-timeout as 20
-TEST $CLI volume set $V0 features.quota-timeout 20
-EXPECT '20' volinfo_field $V0 'features.quota-timeout';
-
-## Enabling features.quota-deem-statfs
-TEST $CLI volume set $V0 features.quota-deem-statfs on
-EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
-
-## Disabling quota
-TEST $CLI volume quota $V0 disable
-EXPECT 'off' volinfo_field $V0 'features.quota'
-EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
-EXPECT '' volinfo_field $V0 'features.quota-timeout'
-
-## Setting quota-timeout as 30
-TEST ! $CLI volume set $V0 features.quota-timeout 30
-EXPECT '' volinfo_field $V0 'features.quota-timeout';
-
-## Disabling features.quota-deem-statfs
-TEST ! $CLI volume set $V0 features.quota-deem-statfs off
-EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
-
-## Finish up
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-782095.t b/tests/bugs/glusterd/bug-782095.t
deleted file mode 100755
index dd8a8dc3026..00000000000
--- a/tests/bugs/glusterd/bug-782095.t
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start and create a volume
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
-
-## Verify volume is is created
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-## Setting performance cache min size as 2MB
-TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
-EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
-
-## Setting performance cache max size as 20MB
-TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
-EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
-
-## Trying to set performance cache min size as 25MB
-TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
-EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
-
-## Able to set performance cache min size as long as its lesser than max size
-TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
-EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
-
-## Trying it out with only cache-max-file-size in CLI as 10MB
-TEST ! $CLI volume set $V0 cache-max-file-size 10MB
-EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
-
-## Finish up
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-824753-file-locker.c b/tests/bugs/glusterd/bug-824753-file-locker.c
index ea8a7630e81..f5dababad30 100644
--- a/tests/bugs/glusterd/bug-824753-file-locker.c
+++ b/tests/bugs/glusterd/bug-824753-file-locker.c
@@ -1,13 +1,17 @@
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdlib.h>
-int main (int argc, char *argv[])
+int
+main(int argc, char *argv[])
{
- int fd = -1;
- int ret = -1;
- char command[2048] = "";
- char filepath[255] = "";
+ int fd = -1;
+ int ret = -1;
+ char command[2048] = "";
+ char filepath[255] = "";
struct flock fl;
fl.l_type = F_WRLCK;
@@ -32,7 +36,7 @@ int main (int argc, char *argv[])
" grep %s | awk -F'..: ' '{print $1}' | grep %s:%s/%s",
argv[1], argv[5], argv[2], argv[2], argv[3], argv[1]);
- ret = system (command);
+ ret = system(command);
close(fd);
if (ret)
diff --git a/tests/bugs/glusterd/bug-824753.t b/tests/bugs/glusterd/bug-824753.t
index 2ce4a07c5bd..b969e28f35e 100755
--- a/tests/bugs/glusterd/bug-824753.t
+++ b/tests/bugs/glusterd/bug-824753.t
@@ -9,7 +9,7 @@ TEST glusterd;
TEST pidof glusterd;
TEST $CLI volume info;
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
function volinfo_field()
{
diff --git a/tests/bugs/glusterd/bug-839595.t b/tests/bugs/glusterd/bug-839595.t
deleted file mode 100644
index b2fe9789a8c..00000000000
--- a/tests/bugs/glusterd/bug-839595.t
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}1
-TEST $CLI volume set $V0 cluster.server-quorum-type server
-EXPECT "server" volume_option $V0 cluster.server-quorum-type
-TEST $CLI volume set $V0 cluster.server-quorum-type none
-EXPECT "none" volume_option $V0 cluster.server-quorum-type
-TEST $CLI volume reset $V0 cluster.server-quorum-type
-TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
-TEST ! $CLI volume set all cluster.server-quorum-type none
-TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
-
-TEST ! $CLI volume set all cluster.server-quorum-ratio abc
-TEST ! $CLI volume set all cluster.server-quorum-ratio -1
-TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
-TEST $CLI volume set all cluster.server-quorum-ratio 0
-EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
-TEST $CLI volume set all cluster.server-quorum-ratio 100
-EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
-TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
-EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
-TEST $CLI volume set all cluster.server-quorum-ratio 100%
-EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
-cleanup;
diff --git a/tests/bugs/glusterd/bug-857330/common.rc b/tests/bugs/glusterd/bug-857330/common.rc
deleted file mode 100644
index 8342dccb442..00000000000
--- a/tests/bugs/glusterd/bug-857330/common.rc
+++ /dev/null
@@ -1,55 +0,0 @@
-. $(dirname $0)/../../../include.rc
-
-UUID_REGEX='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
-
-TASK_ID=""
-COMMAND=""
-PATTERN=""
-
-function check-and-store-task-id()
-{
- TASK_ID=""
-
- local task_id=$($CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX")
-
- if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then
- return 1
- fi
-
- TASK_ID=$task_id
- return 0;
-}
-
-function get-task-id()
-{
- $CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX" | tail -n1
-
-}
-
-function check-and-store-task-id-xml()
-{
- TASK_ID=""
-
- local task_id=$($CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX")
-
- if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then
- return 1
- fi
-
- TASK_ID=$task_id
- return 0;
-}
-
-function get-task-id-xml()
-{
- $CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX"
-}
-
-function get-task-status()
-{
- $CLI $COMMAND | grep -o $PATTERN
- if [ ${PIPESTATUS[0]} -ne 0 ]; then
- return 1
- fi
- return 0
-}
diff --git a/tests/bugs/glusterd/bug-857330/normal.t b/tests/bugs/glusterd/bug-857330/normal.t
deleted file mode 100755
index 02018f244a8..00000000000
--- a/tests/bugs/glusterd/bug-857330/normal.t
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/common.rc
-. $(dirname $0)/../../../volume.rc
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}1;
-TEST $CLI volume info $V0;
-TEST $CLI volume start $V0;
-
-TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
-
-TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \
- --multi -b 10 -d 10 -n 10 $M0;
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-
-###############
-## Rebalance ##
-###############
-TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2;
-
-COMMAND="volume rebalance $V0 start"
-PATTERN="ID:"
-TEST check-and-store-task-id
-
-COMMAND="volume status $V0"
-PATTERN="ID"
-EXPECT $TASK_ID get-task-id
-
-COMMAND="volume rebalance $V0 status"
-PATTERN="completed"
-EXPECT_WITHIN 300 $PATTERN get-task-status
-
-###################
-## Replace-brick ##
-###################
-REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3"
-
-COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start"
-PATTERN="ID:"
-TEST check-and-store-task-id
-
-COMMAND="volume status $V0"
-PATTERN="ID"
-EXPECT $TASK_ID get-task-id
-
-COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status"
-PATTERN="complete"
-EXPECT_WITHIN 300 $PATTERN get-task-status
-
-TEST $CLI volume replace-brick $V0 $REP_BRICK_PAIR commit;
-
-##################
-## Remove-brick ##
-##################
-COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start"
-PATTERN="ID:"
-TEST check-and-store-task-id
-
-COMMAND="volume status $V0"
-PATTERN="ID"
-EXPECT $TASK_ID get-task-id
-
-COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status"
-PATTERN="completed"
-EXPECT_WITHIN 300 $PATTERN get-task-status
-
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 commit
-
-TEST $CLI volume stop $V0;
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-857330/xml.t b/tests/bugs/glusterd/bug-857330/xml.t
deleted file mode 100755
index 3aec3b89bbe..00000000000
--- a/tests/bugs/glusterd/bug-857330/xml.t
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/common.rc
-. $(dirname $0)/../../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}1;
-TEST $CLI volume info $V0;
-TEST $CLI volume start $V0;
-
-TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
-
-TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \
- --multi -b 10 -d 10 -n 10 $M0;
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-
-
-###############
-## Rebalance ##
-###############
-TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2;
-
-COMMAND="volume rebalance $V0 start"
-PATTERN="task-id"
-TEST check-and-store-task-id-xml
-
-COMMAND="volume status $V0"
-PATTERN="id"
-EXPECT $TASK_ID get-task-id-xml
-
-COMMAND="volume rebalance $V0 status"
-PATTERN="task-id"
-EXPECT $TASK_ID get-task-id-xml
-
-## TODO: Add tests for rebalance stop
-
-COMMAND="volume rebalance $V0 status"
-PATTERN="completed"
-EXPECT_WITHIN 300 $PATTERN get-task-status
-
-###################
-## Replace-brick ##
-###################
-REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3"
-
-COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start"
-PATTERN="task-id"
-TEST check-and-store-task-id-xml
-
-COMMAND="volume status $V0"
-PATTERN="id"
-EXPECT $TASK_ID get-task-id-xml
-
-COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status"
-PATTERN="task-id"
-EXPECT $TASK_ID get-task-id-xml
-
-## TODO: Add more tests for replace-brick pause|abort
-
-COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status"
-PATTERN="complete"
-EXPECT_WITHIN 300 $PATTERN get-task-status
-
-COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR commit"
-PATTERN="task-id"
-EXPECT $TASK_ID get-task-id-xml
-
-##################
-## Remove-brick ##
-##################
-COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start"
-PATTERN="task-id"
-TEST check-and-store-task-id-xml
-
-COMMAND="volume status $V0"
-PATTERN="id"
-EXPECT $TASK_ID get-task-id-xml
-
-COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status"
-PATTERN="task-id"
-EXPECT $TASK_ID get-task-id-xml
-
-COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status"
-PATTERN="completed"
-EXPECT_WITHIN 300 $PATTERN get-task-status
-
-## TODO: Add tests for remove-brick stop
-
-COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 commit"
-PATTERN="task-id"
-EXPECT $TASK_ID get-task-id-xml
-
-TEST $CLI volume stop $V0;
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-859927.t b/tests/bugs/glusterd/bug-859927.t
deleted file mode 100755
index c30d2b852d4..00000000000
--- a/tests/bugs/glusterd/bug-859927.t
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-cleanup;
-
-glusterd;
-
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
-
-TEST ! $CLI volume set $V0 statedump-path ""
-TEST ! $CLI volume set $V0 statedump-path " "
-TEST $CLI volume set $V0 statedump-path "/home/"
-EXPECT "/home/" volume_option $V0 server.statedump-path
-
-TEST ! $CLI volume set $V0 background-self-heal-count ""
-TEST ! $CLI volume set $V0 background-self-heal-count " "
-TEST $CLI volume set $V0 background-self-heal-count 10
-EXPECT "10" volume_option $V0 cluster.background-self-heal-count
-
-TEST ! $CLI volume set $V0 cache-size ""
-TEST ! $CLI volume set $V0 cache-size " "
-TEST $CLI volume set $V0 cache-size 512MB
-EXPECT "512MB" volume_option $V0 performance.cache-size
-
-TEST ! $CLI volume set $V0 self-heal-daemon ""
-TEST ! $CLI volume set $V0 self-heal-daemon " "
-TEST $CLI volume set $V0 self-heal-daemon on
-EXPECT "on" volume_option $V0 cluster.self-heal-daemon
-
-TEST ! $CLI volume set $V0 read-subvolume ""
-TEST ! $CLI volume set $V0 read-subvolume " "
-TEST $CLI volume set $V0 read-subvolume $V0-client-0
-EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
-
-TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
-TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
-TEST ! $CLI volume set $V0 data-self-heal-algorithm on
-TEST $CLI volume set $V0 data-self-heal-algorithm full
-EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
-
-TEST ! $CLI volume set $V0 min-free-inodes ""
-TEST ! $CLI volume set $V0 min-free-inodes " "
-TEST $CLI volume set $V0 min-free-inodes 60%
-EXPECT "60%" volume_option $V0 cluster.min-free-inodes
-
-TEST ! $CLI volume set $V0 min-free-disk ""
-TEST ! $CLI volume set $V0 min-free-disk " "
-TEST $CLI volume set $V0 min-free-disk 60%
-EXPECT "60%" volume_option $V0 cluster.min-free-disk
-
-TEST $CLI volume set $V0 min-free-disk 120
-EXPECT "120" volume_option $V0 cluster.min-free-disk
-
-TEST ! $CLI volume set $V0 frame-timeout ""
-TEST ! $CLI volume set $V0 frame-timeout " "
-TEST $CLI volume set $V0 frame-timeout 0
-EXPECT "0" volume_option $V0 network.frame-timeout
-
-TEST ! $CLI volume set $V0 auth.allow ""
-TEST ! $CLI volume set $V0 auth.allow " "
-TEST $CLI volume set $V0 auth.allow 192.168.122.1
-EXPECT "192.168.122.1" volume_option $V0 auth.allow
-
-TEST ! $CLI volume set $V0 stripe-block-size ""
-TEST ! $CLI volume set $V0 stripe-block-size " "
-TEST $CLI volume set $V0 stripe-block-size 512MB
-EXPECT "512MB" volume_option $V0 cluster.stripe-block-size
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-862834.t b/tests/bugs/glusterd/bug-862834.t
deleted file mode 100755
index ac2f956a1ed..00000000000
--- a/tests/bugs/glusterd/bug-862834.t
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-V1="patchy2"
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-
-function check_brick()
-{
- vol=$1;
- num=$2
- $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
-}
-
-function volinfo_field()
-{
- local vol=$1;
- local field=$2;
-
- $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
-}
-
-function brick_count()
-{
- local vol=$1;
-
- $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
-}
-
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-EXPECT '2' brick_count $V0
-
-
-EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
-EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
-
-TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-878004.t b/tests/bugs/glusterd/bug-878004.t
deleted file mode 100644
index 8abada3c3b3..00000000000
--- a/tests/bugs/glusterd/bug-878004.t
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info;
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3;
-
-function brick_count()
-{
- local vol=$1;
-
- $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
-}
-
-
-TEST $CLI volume start $V0
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
-EXPECT '2' brick_count $V0
-
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force;
-EXPECT '1' brick_count $V0
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-888752.t b/tests/bugs/glusterd/bug-888752.t
deleted file mode 100644
index ed0602e34e2..00000000000
--- a/tests/bugs/glusterd/bug-888752.t
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-# Check if xml output is generated correctly for volume status for a single brick
-# present on another peer and no async tasks are running.
-
-function get_peer_count {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-cleanup
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 get_peer_count
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-
-TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
-
-TEST $CLI_1 volume stop $V0
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-889630.t b/tests/bugs/glusterd/bug-889630.t
deleted file mode 100755
index 4fefd94d66f..00000000000
--- a/tests/bugs/glusterd/bug-889630.t
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-function volume_count {
- local cli=$1;
- if [ $cli -eq '1' ] ; then
- $CLI_1 volume info | grep 'Volume Name' | wc -l;
- else
- $CLI_2 volume info | grep 'Volume Name' | wc -l;
- fi
-}
-
-cleanup;
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-
-b="B1";
-
-#Create an extra file in the originator's volume store
-touch ${!b}/glusterd/vols/$V0/run/file
-
-TEST $CLI_1 volume stop $V0
-#Test for self-commit failure
-TEST $CLI_1 volume delete $V0
-
-#Check whether delete succeeded on both the nodes
-EXPECT "0" volume_count '1'
-EXPECT "0" volume_count '2'
-
-#Check whether the volume name can be reused after deletion
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
-TEST $CLI_1 volume start $V0
-
-#Create an extra file in the peer's volume store
-touch ${!b}/glusterd/vols/$V0/run/file
-
-TEST $CLI_1 volume stop $V0
-#Test for commit failure on the other node
-TEST $CLI_2 volume delete $V0
-
-EXPECT "0" volume_count '1';
-EXPECT "0" volume_count '2';
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-905307.t b/tests/bugs/glusterd/bug-905307.t
deleted file mode 100644
index dd1c1bc0795..00000000000
--- a/tests/bugs/glusterd/bug-905307.t
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-TEST glusterd
-TEST pidof glusterd
-
-#test functionality of post-op-delay-secs
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-
-#Strings should not be accepted.
-TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
-
-#-ve ints should not be accepted.
-TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
-
-#INT_MAX+1 should not be accepted.
-TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
-
-#floats should not be accepted.
-TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
-
-#min val 0 should be accepted
-TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
-EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
-
-#max val 2147483647 should be accepted
-TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
-EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
-
-#some middle val in range 2147 should be accepted
-TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
-EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
-cleanup;
diff --git a/tests/bugs/glusterd/bug-913487.t b/tests/bugs/glusterd/bug-913487.t
deleted file mode 100644
index 9c616ea28fb..00000000000
--- a/tests/bugs/glusterd/bug-913487.t
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd;
-
-TEST ! $CLI volume set $V0 performance.open-behind off;
-
-TEST pidof glusterd;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-913555.t b/tests/bugs/glusterd/bug-913555.t
deleted file mode 100755
index 4f9e004a654..00000000000
--- a/tests/bugs/glusterd/bug-913555.t
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-
-# Test that a volume becomes unwritable when the cluster loses quorum.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-function check_fs {
- df $1 &> /dev/null
- echo $?
-}
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-function glusterfsd_count {
- pidof glusterfsd | wc -w;
-}
-
-cleanup;
-
-TEST launch_cluster 3; # start 3-node virtual cluster
-TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
-TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
-TEST $CLI_1 volume start $V0
-TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
-
-# Kill one pseudo-node, make sure the others survive and volume stays up.
-TEST kill_node 3;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
-EXPECT 0 check_fs $M0;
-EXPECT 2 glusterfsd_count;
-
-# Kill another pseudo-node, make sure the last one dies and volume goes down.
-TEST kill_node 2;
-EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
-EXPECT 1 check_fs $M0;
-EXPECT 0 glusterfsd_count; # the two glusterfsds of the other two glusterds
- # must be dead
-
-TEST $glusterd_2;
-TEST $glusterd_3;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 glusterfsd_count; # restore quorum, all ok
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
-
-cleanup
diff --git a/tests/bugs/glusterd/bug-916549.t b/tests/bugs/glusterd/bug-916549.t
deleted file mode 100755
index bedbdd60bb6..00000000000
--- a/tests/bugs/glusterd/bug-916549.t
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd;
-TEST $CLI volume create $V0 $H0:$B0/${V0}1;
-TEST $CLI volume start $V0;
-
-pid_file=$(ls $GLUSTERD_WORKDIR/vols/$V0/run);
-brick_pid=$(cat $GLUSTERD_WORKDIR/vols/$V0/run/$pid_file);
-
-
-kill -SIGKILL $brick_pid;
-TEST $CLI volume start $V0 force;
-TEST process_leak_count $(pidof glusterd);
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-948686.t b/tests/bugs/glusterd/bug-948686.t
deleted file mode 100755
index dfe11ff153f..00000000000
--- a/tests/bugs/glusterd/bug-948686.t
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-cleanup;
-#setup cluster and test volume
-TEST launch_cluster 3; # start 3-node virtual cluster
-TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
-
-TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
-TEST $CLI_1 volume start $V0
-TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
-
-#kill a node
-TEST kill_node 3
-
-#modify volume config to see change in volume-sync
-TEST $CLI_1 volume set $V0 write-behind off
-#add some files to the volume to see effect of volume-heal cmd
-TEST touch $M0/{1..100};
-TEST $CLI_1 volume stop $V0;
-TEST $glusterd_3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
-TEST $CLI_3 volume start $V0;
-TEST $CLI_2 volume stop $V0;
-TEST $CLI_2 volume delete $V0;
-
-cleanup;
-
-TEST glusterd;
-TEST $CLI volume create $V0 $H0:$B0/$V0
-TEST $CLI volume start $V0
-pkill glusterd;
-pkill glusterfsd;
-TEST glusterd
-TEST $CLI volume status $V0
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-949930.t b/tests/bugs/glusterd/bug-949930.t
index 774802a66b2..9a6d38fa37f 100644
--- a/tests/bugs/glusterd/bug-949930.t
+++ b/tests/bugs/glusterd/bug-949930.t
@@ -10,9 +10,11 @@ TEST glusterd;
TEST pidof glusterd;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 nfs.disable off
TEST $CLI volume start $V0;
TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
+TEST $CLI volume set $V1 nfs.disable off
TEST $CLI volume start $V1;
TEST ! $CLI volume set $V0 performance.nfs.read-ahead blah
diff --git a/tests/bugs/glusterd/bug-955588.t b/tests/bugs/glusterd/bug-955588.t
deleted file mode 100755
index 028a34edd7d..00000000000
--- a/tests/bugs/glusterd/bug-955588.t
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-TEST glusterd
-TEST pidof glusterd
-
-function get_brick_host_uuid()
-{
- local vol=$1;
- local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
- local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
-
- echo $host_uuid_list | awk '{print $1}'
-}
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-
-uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
-EXPECT $uuid get_brick_host_uuid $V0
-
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-958790.t b/tests/bugs/glusterd/bug-958790.t
deleted file mode 100644
index 39be0a19137..00000000000
--- a/tests/bugs/glusterd/bug-958790.t
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd;
-TEST $CLI volume info;
-
-touch $GLUSTERD_WORKDIR/groups/test
-echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
-echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume set $V0 group test
-EXPECT "off" volume_option $V0 performance.read-ahead
-EXPECT "off" volume_option $V0 performance.open-behind
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-961669.t b/tests/bugs/glusterd/bug-961669.t
deleted file mode 100644
index b02f2f50af1..00000000000
--- a/tests/bugs/glusterd/bug-961669.t
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-
-#Test case: Fail remove-brick 'start' variant when reducing the replica count of a volume.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-#Create a 3x3 dist-rep volume
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5,6,7,8};
-TEST $CLI volume start $V0
-
-# Mount FUSE and create file/directory
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-TEST touch $M0/zerobytefile.txt
-TEST mkdir $M0/test_dir
-TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
-
-function remove_brick_start {
- $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} start 2>&1|grep -oE 'success|failed'
-}
-
-function remove_brick {
- $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} force 2>&1|grep -oE 'success|failed'
-}
-
-#remove-brick start variant
-#Actual message displayed at cli is:
-#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
-EXPECT "failed" remove_brick_start;
-
-#remove-brick commit-force
-#Actual message displayed at cli is:
-#"volume remove-brick commit force: success"
-EXPECT "success" remove_brick
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-963541.t b/tests/bugs/glusterd/bug-963541.t
deleted file mode 100755
index 611626a0d10..00000000000
--- a/tests/bugs/glusterd/bug-963541.t
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
-TEST $CLI volume start $V0;
-
-# Start a remove-brick and try to start a rebalance/remove-brick without committing
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
-
-TEST ! $CLI volume rebalance $V0 start
-TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
-
-#Try to start rebalance/remove-brick again after commit
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
-
-gluster volume status
-
-TEST $CLI volume rebalance $V0 start
-TEST $CLI volume rebalance $V0 stop
-
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
-
-TEST $CLI volume stop $V0
-
-cleanup;
-
diff --git a/tests/bugs/glusterd/bug-964059.t b/tests/bugs/glusterd/bug-964059.t
deleted file mode 100755
index 7b4f60454b8..00000000000
--- a/tests/bugs/glusterd/bug-964059.t
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-function volume_count {
- local cli=$1;
- if [ $cli -eq '1' ] ; then
- $CLI_1 volume info | grep 'Volume Name' | wc -l;
- else
- $CLI_2 volume info | grep 'Volume Name' | wc -l;
- fi
-}
-
-cleanup;
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
-TEST $CLI_1 volume start $V0
-TEST $CLI_1 volume remove-brick $V0 $H2:$B2/$V0 start
-TEST $CLI_1 volume status
-cleanup;
diff --git a/tests/bugs/glusterd/check_elastic_server.t b/tests/bugs/glusterd/check_elastic_server.t
new file mode 100644
index 00000000000..41d2140aa2b
--- /dev/null
+++ b/tests/bugs/glusterd/check_elastic_server.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function cluster_rebalance_status {
+ local vol=$1
+ $CLI_2 volume status | grep -iw "Rebalance" -A 5 | grep "Status" | sed 's/.*: //'
+}
+
+cleanup;
+TEST launch_cluster 4;
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+TEST $CLI_1 peer probe $H4;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+#Mount invalid volume
+TEST ! glusterfs -s $H1 --volfile-id=$V0_NA $M0;
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_remove_brick_status_completed_field "$V0 $H1:$B1/$V0"
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 commit
+
+kill_glusterd 1
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H3:$B3/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H4:$B4/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+kill_glusterd 2
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/daemon-log-level-option.t b/tests/bugs/glusterd/daemon-log-level-option.t
new file mode 100644
index 00000000000..66e55e3d758
--- /dev/null
+++ b/tests/bugs/glusterd/daemon-log-level-option.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+function Info_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " I " | wc -l
+}
+
+function Warning_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " W " | wc -l
+}
+
+function Debug_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " D " | wc -l
+}
+
+function Trace_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " T " | wc -l
+}
+
+cleanup;
+
+# Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+# set cluster.daemon-log-level option to DEBUG
+TEST $CLI volume set all cluster.daemon-log-level DEBUG
+
+#Create a 3X2 distributed-replicate volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+# stop the volume and remove glustershd log
+TEST $CLI volume stop $V0
+rm -f /var/log/glusterfs/glustershd.log
+
+# set cluster.daemon-log-level option to INFO and start the volume
+TEST $CLI volume set all cluster.daemon-log-level INFO
+TEST $CLI volume start $V0
+
+# log should not have any debug messages
+EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+# stop the volume and remove glustershd log
+TEST $CLI volume stop $V0
+rm -f /var/log/glusterfs/glustershd.log
+
+# set cluster.daemon-log-level option to WARNING and start the volume
+TEST $CLI volume set all cluster.daemon-log-level WARNING
+TEST $CLI volume start $V0
+
+# log should not have any info messages
+EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any debug messages
+EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+# stop the volume and remove glustershd log
+TEST $CLI volume stop $V0
+rm -f /var/log/glusterfs/glustershd.log
+
+# set cluster.daemon-log-level option to ERROR and start the volume
+TEST $CLI volume set all cluster.daemon-log-level ERROR
+TEST $CLI volume start $V0
+
+# log should not have any info messages
+EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any warning messages
+EXPECT 0 Warning_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any debug messages
+EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+cleanup
diff --git a/tests/bugs/glusterd/df-results-post-replace-brick-operations.t b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t
new file mode 100644
index 00000000000..04f75889388
--- /dev/null
+++ b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+TEST glusterd
+
+#Create brick partitions
+TEST truncate -s 100M $B0/brick1
+TEST truncate -s 100M $B0/brick2
+TEST truncate -s 100M $B0/brick3
+TEST truncate -s 100M $B0/brick4
+TEST truncate -s 100M $B0/brick5
+
+LO1=`SETUP_LOOP $B0/brick1`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO1
+
+LO2=`SETUP_LOOP $B0/brick2`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO2
+
+LO3=`SETUP_LOOP $B0/brick3`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO3
+
+LO4=`SETUP_LOOP $B0/brick4`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO4
+
+LO5=`SETUP_LOOP $B0/brick5`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO5
+
+TEST mkdir -p $B0/${V0}1 $B0/${V0}2 $B0/${V0}3 $B0/${V0}4 $B0/${V0}5
+TEST MOUNT_LOOP $LO1 $B0/${V0}1
+TEST MOUNT_LOOP $LO2 $B0/${V0}2
+TEST MOUNT_LOOP $LO3 $B0/${V0}3
+TEST MOUNT_LOOP $LO4 $B0/${V0}4
+TEST MOUNT_LOOP $LO5 $B0/${V0}5
+
+# create a subdirectory in mount point and use it for volume creation
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}1/brick1 $H0:$B0/${V0}2/brick1 $H0:$B0/${V0}3/brick1
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" online_brick_count
+
+# mount the volume and check the size at mount point
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
+
+# perform replace brick operations
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1/brick1 $H0:$B0/${V0}4/brick1 commit force
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2/brick1 $H0:$B0/${V0}5/brick1 commit force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# check for the size at mount point, it should be same as previous
+total_space_new=$(df -P $M0 | tail -1 | awk '{ print $2}')
+TEST [ $total_space -eq $total_space_new ]
diff --git a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
new file mode 100644
index 00000000000..8001359e6b3
--- /dev/null
+++ b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
@@ -0,0 +1,71 @@
+#! /bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup
+
+TEST launch_cluster 3
+
+TEST $CLI_1 peer probe $H2
+
+#bug-1109741 - validate mgmt handshake
+
+TEST ! $CLI_3 peer probe $H1
+
+GD1_WD=$($CLI_1 system getwd)
+OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+
+TEST $CLI_3 system uuid get # Needed for glusterd.info to be created
+
+GD3_WD=$($CLI_3 system getwd)
+TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info
+
+TEST kill_glusterd 3
+TEST start_glusterd 3
+
+TEST ! $CLI_3 peer probe $H1
+
+OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
+
+#bug-948686 - volume sync after bringing up the killed node
+
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+
+#kill a node
+TEST kill_node 3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 2
+
+#modify volume config to see change in volume-sync
+TEST $CLI_1 volume set $V0 write-behind off
+#add some files to the volume to see effect of volume-heal cmd
+TEST touch $M0/{1..100};
+TEST $CLI_1 volume stop $V0;
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 'Stopped' volinfo_field_1 $V0 'Status'
+
+TEST $glusterd_3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
+
+sleep 5
+TEST $CLI_3 volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
+TEST $CLI_2 volume stop $V0;
+TEST $CLI_2 volume delete $V0;
+
+cleanup
diff --git a/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t b/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t
new file mode 100644
index 00000000000..99272e14245
--- /dev/null
+++ b/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+#bug-1454418 - Setting Port number in specific range
+sysctl net.ipv4.ip_local_reserved_ports="24007-24008,32765-32768,49152-49156"
+
+TEST launch_cluster 4;
+
+#bug-1223213
+
+# Fool the cluster to operate with 3.5 version even though binary's op-version
+# is > 3.5. This is to ensure 3.5 code path is hit to test that volume status
+# works when a node is upgraded from 3.5 to 3.7 or higher as mgmt_v3 lock is
+# been introduced in 3.6 version and onwards
+
+GD1_WD=$($CLI_1 system getwd)
+$CLI_1 system uuid get
+Old_op_version=$(cat ${GD1_WD}/glusterd.info | grep operating-version | cut -d '=' -f 2)
+
+TEST sed -rnie "'s/(operating-version=)\w+/\130500/gip'" ${GD1_WD}/glusterd.info
+
+TEST kill_glusterd 1
+TEST start_glusterd 1
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+TEST `sed -i "s/"30500"/${Old_op_version}/g" ${GD1_WD}/glusterd.info`
+
+TEST kill_glusterd 1
+TEST start_glusterd 1
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+#bug-1454418
+sysctl net.ipv4.ip_local_reserved_ports="
+"
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+#bug-888752 - volume status --xml from peer in the cluster
+
+TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1
+
+# bug - 1635820
+# rebooting a node which doen't host bricks for any one volume
+# peer should not go into rejected state
+TEST kill_glusterd 2
+TEST start_glusterd 2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
+
+TEST $CLI_1 volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V1 'Status'
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+TEST $CLI_1 peer probe $H4;
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count 1
+
+#bug-1173414 - validate mgmt-v3-remote-lock-failure
+
+for i in {1..20}
+do
+$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
+$CLI_1 volume set $V1 barrier on
+$CLI_2 volume set $V0 diagnostics.client-log-level DEBUG &
+$CLI_2 volume set $V1 barrier on
+done
+
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count 1
+TEST $CLI_1 volume status
+TEST $CLI_2 volume status
+
+#bug-1293414 - validate peer detach
+
+# peers hosting bricks cannot be detached
+TEST ! $CLI_4 peer detach $H1
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count 1
+
+# peer not hosting bricks should be detachable
+TEST $CLI_4 peer detach $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#bug-1344407 - deleting a volume when peer is down should fail
+
+#volume should be stopped before deletion
+TEST $CLI_1 volume stop $V0
+
+TEST kill_glusterd 2
+TEST ! $CLI_1 volume delete $V0
+
+cleanup
diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
new file mode 100644
index 00000000000..b89ca22415e
--- /dev/null
+++ b/tests/bugs/glusterd/optimized-basic-testcases.t
@@ -0,0 +1,305 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+function get_opret_value () {
+ local VOL=$1
+ $CLI volume info $VOL --xml | sed -ne 's/.*<opRet>\([-0-9]*\)<\/opRet>/\1/p'
+}
+
+function check_brick()
+{
+ vol=$1;
+ num=$2
+ $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+function get_brick_host_uuid()
+{
+ local vol=$1;
+ local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
+ local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
+
+ echo $host_uuid_list | awk '{print $1}'
+}
+
+function generate_statedump_and_check_for_glusterd_info {
+ pid=`pidof glusterd`
+ #remove old stale statedumps
+ cleanup_statedump $pid
+ kill -USR1 $pid
+ #Wait till the statedump is generated
+ sleep 1
+ fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.")
+ cat $statedumpdir/$fname | grep "xlator.glusterd.priv" | wc -l
+}
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+#bug-1238135-lazy-daemon-initialization-on-demand
+
+GDWD=$($CLI system getwd)
+
+# glusterd.info file will be created on either first peer probe or volume
+# creation, hence we expect file to be not present in this case
+TEST ! -e $GDWD/glusterd.info
+
+#bug-913487 - setting volume options before creation of volume should fail
+
+TEST ! $CLI volume set $V0 performance.open-behind off;
+TEST pidof glusterd;
+
+#bug-1433578 - glusterd should not crash after probing a invalid peer
+
+TEST ! $CLI peer probe invalid-peer
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+#bug-1786478 - default volume option after volume reset
+addr_family=`volinfo_field $V0 'transport.address-family'`
+TEST $CLI volume reset $V0
+EXPECT $addr_family volinfo_field $V0 'transport.address-family'
+
+#bug-955588 - uuid validation
+
+uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
+EXPECT $uuid get_brick_host_uuid $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+#bug-958790 - set options from file
+
+touch $GLUSTERD_WORKDIR/groups/test
+echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
+echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 group test
+EXPECT "off" volume_option $V0 performance.read-ahead
+EXPECT "off" volume_option $V0 performance.open-behind
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#bug-1321836 - validate opret value for non existing volume
+
+EXPECT 0 get_opret_value $V0
+EXPECT -1 get_opret_value "novol"
+
+EXPECT '2' brick_count $V0
+
+#bug-862834 - validate brick status
+
+EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
+EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
+
+TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
+
+#bug-1482344 - setting volume-option-at-cluster-level should not result in glusterd crash
+
+TEST ! $CLI volume set all transport.listen-backlog 128
+
+# Check the volume info output, if glusterd would have crashed then this command
+# will fail
+TEST $CLI volume info $V0;
+
+#bug-1002556 and bug-1199451 - command should retrieve current op-version of the node
+TEST $CLI volume get all cluster.op-version
+
+#bug-1315186 - reject-lowering-down-op-version
+
+OP_VERS_ORIG=$(grep 'operating-version' ${GDWD}/glusterd.info | cut -d '=' -f 2)
+OP_VERS_NEW=`expr $OP_VERS_ORIG-1`
+
+TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
+
+#bug-1022055 - validate log rotate command
+
+TEST ! $CLI volume log rotate $V0;
+TEST $CLI volume log $V0 rotate;
+
+#bug-1092841 - validating barrier enable/disable
+
+TEST $CLI volume barrier $V0 enable;
+TEST ! $CLI volume barrier $V0 enable;
+
+TEST $CLI volume barrier $V0 disable;
+TEST ! $CLI volume barrier $V0 disable;
+
+#bug-1095097 - validate volume profile command
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info
+
+#bug-839595 - validate server-quorum options
+
+TEST $CLI volume set $V0 cluster.server-quorum-type server
+EXPECT "server" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume set $V0 cluster.server-quorum-type none
+EXPECT "none" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume reset $V0 cluster.server-quorum-type
+TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
+TEST ! $CLI volume set all cluster.server-quorum-type none
+TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
+
+TEST ! $CLI volume set all cluster.server-quorum-ratio abc
+TEST ! $CLI volume set all cluster.server-quorum-ratio -1
+TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
+TEST $CLI volume set all cluster.server-quorum-ratio 0
+EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100
+EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
+EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100%
+EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
+
+#bug-1265479 - validate-distributed-volume-options
+
+#Setting data-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 data-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
+TEST ! $CLI volume set $V0 cluster.data-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
+
+#Setting metadata-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 metadata-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
+TEST ! $CLI volume set $V0 cluster.metadata-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
+
+#Setting entry-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 entry-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
+TEST ! $CLI volume set $V0 cluster.entry-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
+
+#bug-1163108 - validate min-free-disk-option
+
+## Setting invalid value for option cluster.min-free-disk should fail
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk 143.!/12
+TEST ! $CLI volume set $V0 min-free-disk 123%
+TEST ! $CLI volume set $V0 min-free-disk 194.34%
+
+## Setting fractional value as a size (unit is byte) for option
+## cluster.min-free-disk should fail
+TEST ! $CLI volume set $V0 min-free-disk 199.051
+TEST ! $CLI volume set $V0 min-free-disk 111.999
+
+## Setting valid value for option cluster.min-free-disk should pass
+TEST $CLI volume set $V0 min-free-disk 12%
+TEST $CLI volume set $V0 min-free-disk 56.7%
+TEST $CLI volume set $V0 min-free-disk 120
+TEST $CLI volume set $V0 min-free-disk 369.0000
+
+#bug-1179175-uss-option-validation
+
+## Set features.uss option with non-boolean value. These non-boolean value
+## for features.uss option should fail.
+TEST ! $CLI volume set $V0 features.uss abcd
+TEST ! $CLI volume set $V0 features.uss #$#$
+TEST ! $CLI volume set $V0 features.uss 2324
+
+## Setting other options with valid value. These options should succeed.
+TEST $CLI volume set $V0 barrier enable
+TEST $CLI volume set $V0 ping-timeout 60
+
+## Set features.uss option with valid boolean value. It should succeed.
+TEST $CLI volume set $V0 features.uss enable
+TEST $CLI volume set $V0 features.uss disable
+
+
+## Setting other options with valid value. These options should succeed.
+TEST $CLI volume set $V0 barrier enable
+TEST $CLI volume set $V0 ping-timeout 60
+
+#bug-1209329 - daemon-svcs-on-reset-volume
+
+##enable the bitrot and verify bitd is running or not
+TEST $CLI volume bitrot $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.bitrot'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+##Do reset force which set the bitrot options to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_bitd_count
+
+##enable the uss option and verify snapd is running or not
+TEST $CLI volume set $V0 features.uss on
+EXPECT 'on' volinfo_field $V0 'features.uss'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
+
+##Do reset force which set the uss options to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
+
+##verify initial nfs disabled by default
+EXPECT "0" get_nfs_count
+
+##enable nfs and verify
+TEST $CLI volume set $V0 nfs.disable off
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+EXPECT "1" get_nfs_count
+
+##Do reset force which set the nfs.option to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
+
+##enable the uss option and verify snapd is running or not
+TEST $CLI volume set $V0 features.uss on
+EXPECT 'on' volinfo_field $V0 'features.uss'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
+
+##Disable the uss option using set command and verify snapd
+TEST $CLI volume set $V0 features.uss off
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
+
+##enable nfs.disable and verify
+TEST $CLI volume set $V0 nfs.disable on
+EXPECT 'on' volinfo_field $V0 'nfs.disable'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
+
+## disable nfs.disable option using set command
+TEST $CLI volume set $V0 nfs.disable off
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
+
+TEST $CLI volume info;
+TEST $CLI volume create $V1 $H0:$B0/${V1}1
+TEST $CLI volume start $V1
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+TEST $CLI volume status $V1
+
+#bug-853601 - Avoid using /var/lib/glusterd as a brick
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd force
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc force
+mkdir -p /xyz/var/lib/glusterd/abc
+
+#bug 1716812 - volfile should be created with transport type both
+TEST $CLI volume create "test" transport tcp,rdma $H0:/xyz/var/lib/glusterd/abc
+EXPECT 'Created' volinfo_field "test" 'Status';
+
+#While taking a statedump, there is a TRY_LOCK on call_frame, which might may cause
+#failure. So Adding a EXPECT_WITHIN
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" generate_statedump_and_check_for_glusterd_info
+
+cleanup_statedump `pidof glusterd`
+cleanup
diff --git a/tests/bugs/glusterd/quorum-validation.t b/tests/bugs/glusterd/quorum-validation.t
new file mode 100644
index 00000000000..3cc3351b43b
--- /dev/null
+++ b/tests/bugs/glusterd/quorum-validation.t
@@ -0,0 +1,122 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+#bug-1177132 - sync server quorum options when a node is brought up
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 52
+
+#Bring down 2nd glusterd
+TEST kill_glusterd 2
+EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
+
+#bug-1104642 - sync server quorum options when a node is brought up
+#set the volume all options from the 1st glusterd
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
+
+# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
+#(Command based on syncop framework)should fail
+TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2
+TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
+TEST ! $CLI_1 volume set $V0 barrier enable
+
+#quorum is not met, rebalance/profile start should fail
+TEST ! $CLI_1 volume rebalance $V0 start
+TEST ! $CLI_1 volume profile $V0 start
+
+#bug-1690753 - Volume stop when quorum not met is successful
+TEST ! $CLI_1 volume stop $V0
+
+#Bring back the 2nd glusterd
+TEST $glusterd_2
+
+#verify whether the value has been synced
+EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_1 all cluster.server-quorum-ratio
+EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_2 all cluster.server-quorum-ratio
+
+# Now quorum is meet.
+# Add-brick, Remove-brick, volume-set command should success
+TEST $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
+TEST $CLI_1 volume set $V0 barrier enable
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+## Stop the volume
+TEST $CLI_1 volume stop $V0
+
+## Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+## Now quorum is not meet. Starting volume on 1st node should not success
+TEST ! $CLI_1 volume start $V0
+
+## Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a cluster
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+## Now quorum is meet. Starting volume on 1st node should be success.
+TEST $CLI_1 volume start $V0
+
+# Now re-execute the same profile command and this time it should succeed
+TEST $CLI_1 volume profile $V0 start
+
+#bug-1352277
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type none
+
+# Bring down all the gluster processes
+TEST killall_gluster
+
+#bring back 1st glusterd and check whether the brick process comes back
+TEST $glusterd_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+
+#enabling quorum should bring down the brick
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+#bug-1367478 - brick processes should not be up when quorum is not met
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}1 $H2:$B2/${V1}2
+TEST $CLI_1 volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H1 $B1/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H2 $B2/${V1}2
+
+# Restart 2nd glusterd
+TEST kill_glusterd 2
+TEST $glusterd_2
+
+# Check if all bricks are up
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H1 $B1/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H2 $B2/${V1}2
+
+cleanup
diff --git a/tests/bugs/glusterd/rebalance-in-cluster.t b/tests/bugs/glusterd/rebalance-in-cluster.t
new file mode 100644
index 00000000000..469ec6cd48e
--- /dev/null
+++ b/tests/bugs/glusterd/rebalance-in-cluster.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function rebalance_status_field_1 {
+ $CLI_1 volume rebalance $1 status | awk '{print $7}' | sed -n 3p
+}
+
+cleanup;
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+#bug-1231437
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
+
+TEST $CLI_1 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+
+#bug - 1764119 - rebalance status should display detailed info when any of the node is dowm
+TEST kill_glusterd 2
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field_1 $V0
+
+TEST start_glusterd 2
+#bug-1245142
+
+$CLI_1 volume rebalance $V0 start &
+#kill glusterd2 after requst sent, so that call back is called
+#with rpc->status fail ,so roughly 1sec delay is introduced to get this scenario.
+sleep 1
+kill_glusterd 2
+#check glusterd commands are working after rebalance start command
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/rebalance-operations-in-single-node.t b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
new file mode 100644
index 00000000000..ef85887f440
--- /dev/null
+++ b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function get_rebalanced_info()
+{
+ local rebal_info_key=$2
+ $CLI volume rebalance $1 status | awk '{print $'$rebal_info_key'}' |sed -n 3p| sed 's/ *$//g'
+}
+
+volname="StartMigrationDuringRebalanceTest"
+TEST glusterd
+TEST pidof glusterd;
+
+TEST $CLI volume info;
+TEST $CLI volume create $volname $H0:$B0/${volname}{1..4};
+TEST $CLI volume start $volname;
+
+#bug-1046308 - validate rebalance on a specified volume name
+TEST $CLI volume rebalance $volname start;
+
+#bug-1089668 - validation of rebalance status and remove brick status
+#bug-963541 - after remove brick start rebalance/remove brick start without commiting should fail
+
+TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}1 status
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
+
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 start
+TEST ! $CLI volume rebalance $volname start
+TEST ! $CLI volume rebalance $volname status
+TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field \
+"$volname" "$H0:$B0/${volname}1"
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 commit
+
+TEST $CLI volume rebalance $volname start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
+TEST $CLI volume rebalance $volname stop
+
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 stop
+
+#bug-1351021-rebalance-info-post-glusterd-restart
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
+TEST $CLI volume start $V0;
+
+#Mount volume and create data
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+TEST mkdir $M0/dir{1..10}
+TEST touch $M0/dir{1..10}/file{1..10}
+
+# Add-brick and start rebalance
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4
+TEST $CLI volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+#Rebalance info before glusterd restart
+OLD_REBAL_FILES=$(get_rebalanced_info $V0 2)
+OLD_SIZE=$(get_rebalanced_info $V0 3)
+OLD_SCANNED=$(get_rebalanced_info $V0 4)
+OLD_FAILURES=$(get_rebalanced_info $V0 5)
+OLD_SKIPPED=$(get_rebalanced_info $V0 6)
+
+
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+
+#Rebalance info after glusterd restart
+NEW_REBAL_FILES=$(get_rebalanced_info $V0 2)
+NEW_SIZE=$(get_rebalanced_info $V0 3)
+NEW_SCANNED=$(get_rebalanced_info $V0 4)
+NEW_FAILURES=$(get_rebalanced_info $V0 5)
+NEW_SKIPPED=$(get_rebalanced_info $V0 6)
+#Check rebalance info before and after glusterd restart
+TEST [ $OLD_REBAL_FILES == $NEW_REBAL_FILES ]
+TEST [ $OLD_SIZE == $NEW_SIZE ]
+TEST [ $OLD_SCANNED == $NEW_SCANNED ]
+TEST [ $OLD_FAILURES == $NEW_FAILURES ]
+TEST [ $OLD_SKIPPED == $NEW_SKIPPED ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#bug-1004744 - validation of rebalance fix layout
+
+TEST $CLI volume start $V0 force
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+for i in `seq 11 20`;
+do
+ mkdir $M0/dir_$i
+ echo file>$M0/dir_$i/file_$i
+ for j in `seq 1 100`;
+ do
+ mkdir $M0/dir_$i/dir_$j
+ echo file>$M0/dir_$i/dir_$j/file_$j
+ done
+done
+
+#add 2 bricks
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{5,6};
+
+#perform rebalance fix-layout
+TEST $CLI volume rebalance $V0 fix-layout start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0;
+
+#bug-1075087 - rebalance post add brick
+TEST mkdir $M0/dir{21..30};
+TEST touch $M0/dir{21..30}/files{1..10};
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{7,8}
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN 180 "completed" rebalance_status_field $V0
+
+TEST pkill gluster
+TEST glusterd
+TEST pidof glusterd
+
+# status should be "completed" immediate after glusterd has respawned.
+EXPECT_WITHIN 20 "completed" rebalance_status_field $V0
+
+cleanup
diff --git a/tests/bugs/glusterd/remove-brick-in-cluster.t b/tests/bugs/glusterd/remove-brick-in-cluster.t
new file mode 100644
index 00000000000..de94220a906
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-in-cluster.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2;
+
+#bug-1047955 - remove brick from new peer in cluster
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
+TEST $CLI_1 volume start $V0;
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
+TEST $CLI_2 volume info
+
+#bug-964059 - volume status post remove brick start
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}0 $H2:$B2/${V1}1
+TEST $CLI_1 volume start $V1
+TEST $CLI_1 volume remove-brick $V1 $H2:$B2/${V1}1 start
+TEST $CLI_1 volume status
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+#bug-1230121 - decrease replica count by remove-brick and increse by add-brick
+## Creating a 2x3 replicate volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick2 \
+ $H1:$B1/brick3 $H2:$B2/brick4 \
+ $H1:$B1/brick5 $H2:$B2/brick6
+
+## Start the volume
+TEST $CLI_1 volume start $V0
+
+## Shrinking volume replica 2x3 to 2x2 by performing remove-brick operation.
+TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick1 $H2:$B2/brick6 force
+
+## Shrinking volume replica 2x2 to 1x2 by performing remove-brick operation
+TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 force
+
+## Shrinking volume replica from 1x2 to 1x1 by performing remove-brick operation
+TEST $CLI_1 volume remove-brick $V0 replica 1 $H1:$B1/brick5 force
+
+
+### Expanding volume replica by performing add-brick operation.
+
+## Expend volume replica from 1x1 to 1x2 by performing add-brick operation
+TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/brick5 force
+
+## Expend volume replica from 1x2 to 2x2 by performing add-brick operation
+TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 force
+
+## Expend volume replica from 2x2 to 2x3 by performing add-brick operation
+TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick6 force
+
+cleanup
+
diff --git a/tests/bugs/glusterd/remove-brick-testcases.t b/tests/bugs/glusterd/remove-brick-testcases.t
new file mode 100644
index 00000000000..2f982d5266f
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-testcases.t
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..5}
+TEST $CLI volume start $V0
+
+#bug-1225716 - remove-brick on a brick which is down should fail
+#kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}1
+
+#remove-brick start should fail as the brick is down
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+#remove-brick start should succeed as the brick is up
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}1"
+
+#kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+
+#remove-brick commit should pass even if the brick is down
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
+
+#bug-1121584 - brick-existing-validation-for-remove-brick-status-stop
+## Start remove-brick operation on the volume
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+
+## By giving non existing brick for remove-brick status/stop command should
+## give error.
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD status
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD stop
+
+## By giving brick which is part of volume for remove-brick status/stop command
+## should print statistics of remove-brick operation or stop remove-brick
+## operation.
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 status
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
+
+#bug-878004 - validate remove brick force
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
+EXPECT '3' brick_count $V0
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force;
+EXPECT '2' brick_count $V0
+
+#bug-1027171 - Do not allow commit if the bricks are not decommissioned
+#Remove bricks and commit without starting
+function remove_brick_commit_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}4 commit 2>&1 |grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status;
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+
+#Create a 2X3 distributed-replicate volume
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+#Try to reduce replica count with start option
+function remove_brick_start_status {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}3 $H0:$B0/${V0}6 start 2>&1 |grep -oE "success|failed"
+}
+EXPECT "failed" remove_brick_start_status;
+
+#Remove bricks with commit option
+function remove_brick_commit_status2 {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}3 $H0:$B0/${V0}6 commit 2>&1 |
+ grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status2;
+TEST $CLI volume info $V0
+
+#bug-1040408 - reduce replica count of distributed replicate volume
+
+# Reduce to 2x2 volume by specifying bricks in reverse order
+function remove_brick_status {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_status;
+TEST $CLI volume info $V0
+
+#bug-1120647 - remove brick validation
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} start
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}5"
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}4"
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} commit
+TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/${V0}2 force
+
+cleanup
diff --git a/tests/bugs/glusterd/remove-brick-validation.t b/tests/bugs/glusterd/remove-brick-validation.t
new file mode 100644
index 00000000000..a0ff4ff6a24
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-validation.t
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+## start a 3 node virtual cluster
+TEST launch_cluster 3;
+
+## peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+#testcase: bug-1245045-remove-brick-validation
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+kill_glusterd 2
+
+#remove-brick should fail as the peer hosting the brick is down
+TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#volume status should work
+TEST $CLI_2 volume status
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+kill_glusterd 2
+
+#remove-brick commit should fail as the peer hosting the brick is down
+TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} commit
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#volume status should work
+TEST $CLI_2 volume status
+
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} stop
+
+kill_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+
+TEST start_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+TEST $CLI_3 volume status
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-974007.t b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
index 5759adb583f..00beab59137 100644
--- a/tests/bugs/glusterd/bug-974007.t
+++ b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
@@ -1,8 +1,5 @@
#!/bin/bash
-#Test case: Create a distributed replicate volume, and remove multiple
-#replica pairs in a single remove-brick command.
-
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
@@ -17,6 +14,7 @@ TEST $CLI volume info
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
TEST $CLI volume start $V0
+#bug-974007 - remove multiple replica pairs in a single brick command
# Mount FUSE and create files
TEST glusterfs -s $H0 --volfile-id $V0 $M0
TEST touch $M0/file{1..10}
@@ -41,12 +39,42 @@ function remove_brick_commit_status {
}
EXPECT "success" remove_brick_commit_status;
+
# Check the volume type
EXPECT "Replicate" echo `$CLI volume info |grep Type |awk '{print $2}'`
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#bug-961669 - remove brick start should fail when reducing the replica count
+
+#Create a 3x3 dist-rep volume
+TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{0,1,2,3,4,5,6,7,8};
+TEST $CLI volume start $V1
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "9" brick_count ${V1}
+
+# Mount FUSE and create file/directory
+TEST glusterfs -s $H0 --volfile-id $V1 $M0
+TEST touch $M0/zerobytefile.txt
+TEST mkdir $M0/test_dir
+TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
+
+function remove_brick_start {
+ $CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} start 2>&1|grep -oE 'success|failed'
+}
+
+function remove_brick {
+ $CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} force 2>&1|grep -oE 'success|failed'
+}
+
+#remove-brick start variant
+#Actual message displayed at cli is:
+#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
+EXPECT "failed" remove_brick_start;
+
+#remove-brick commit-force
+#Actual message displayed at cli is:
+#"volume remove-brick commit force: success"
+EXPECT "success" remove_brick
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
cleanup;
diff --git a/tests/bugs/glusterd/replace-brick-operations.t b/tests/bugs/glusterd/replace-brick-operations.t
new file mode 100644
index 00000000000..044aa3d6c6d
--- /dev/null
+++ b/tests/bugs/glusterd/replace-brick-operations.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+## Test case for BZ: 1094119 Remove replace-brick support from gluster
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd
+
+## Lets create and start volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0
+
+#bug-1094119-remove-replace-brick-support-from-glusterd
+
+## Now with this patch replace-brick only accept following commad
+## volume replace-brick <VOLNAME> <SOURCE-BRICK> <NEW-BRICK> {commit force}
+## Apart form this replace brick command will failed.
+
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 start
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 status
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 abort
+
+
+## replace-brick commit force command should success
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 commit force
+
+#bug-1242543-replace-brick validation
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Replace brick1 without killing
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST kill_brick $V0 $H0 $B0/${V0}1_new
+
+# Replace brick1 after killing the brick
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_newer commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+cleanup;
diff --git a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
new file mode 100644
index 00000000000..e6e65c48456
--- /dev/null
+++ b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function shd_up_status_1 {
+ $CLI_1 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $7}'
+}
+
+function shd_up_status_2 {
+ $CLI_2 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $7}'
+}
+
+function get_shd_pid_2 {
+ $CLI_2 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $8}'
+}
+
+cleanup;
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+TEST launch_cluster 3
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B0/${V0} $H2:$B0/${V0}
+TEST $CLI_1 volume start $V0
+
+#testcase: bug-1507466 - validate reset-brick commit force
+# Negative case with brick not killed && volume-id xattrs present
+TEST ! $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
+
+TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} start
+# Now test if reset-brick commit force works
+TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
+
+#testcase: bug-1383893 - shd should not come up after restarting the peer glusterd
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B0/${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B0/${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2
+
+# Bring down shd on 2nd node
+kill -15 $(get_shd_pid_2)
+
+# Bring down glusterd on 1st node
+TEST kill_glusterd 1
+
+#Bring back 1st glusterd
+TEST $glusterd_1
+
+# We need to wait till PROCESS_UP_TIMEOUT and then check shd service started
+#on node 2, because once glusterd regains quorum, it will restart all volume
+#level daemons
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2
+
+cleanup;
diff --git a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
new file mode 100644
index 00000000000..a871e112d87
--- /dev/null
+++ b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
@@ -0,0 +1,54 @@
+#! /bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+count=`$CLI_3 peer status | grep 'Peer in Cluster (Connected)' | wc -l`
+echo $count
+}
+
+function check_shd {
+ps aux | grep $1 | grep glustershd | wc -l
+}
+
+cleanup
+
+
+TEST launch_cluster 6
+
+TESTS_EXPECTED_IN_LOOP=25
+for i in $(seq 2 6); do
+ hostname="H$i"
+ TEST $CLI_1 peer probe ${!hostname}
+done
+
+
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers;
+for i in $(seq 1 5); do
+
+ TEST $CLI_1 volume create ${V0}_$i replica 3 $H1:$B1/${V0}_$i $H2:$B2/${V0}_$i $H3:$B3/${V0}_$i $H4:$B4/${V0}_$i $H5:$B5/${V0}_$i $H6:$B6/${V0}_$i
+ TEST $CLI_1 volume start ${V0}_$i force
+
+done
+
+#kill a node
+TEST kill_node 3
+
+TEST $glusterd_3;
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_shd $H3
+
+for i in $(seq 1 5); do
+
+ TEST $CLI_1 volume stop ${V0}_$i
+ TEST $CLI_1 volume delete ${V0}_$i
+
+done
+
+for i in $(seq 1 6); do
+ hostname="H$i"
+ EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 0 check_shd ${!hostname}
+done
+cleanup
diff --git a/tests/bugs/glusterd/snapshot-operations.t b/tests/bugs/glusterd/snapshot-operations.t
new file mode 100644
index 00000000000..4705577d741
--- /dev/null
+++ b/tests/bugs/glusterd/snapshot-operations.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+
+cleanup;
+
+TEST verify_lvm_version
+TEST launch_cluster 3;
+TEST setup_lvm 3;
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1 $H2:$L2
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#bug-1318591 - skip-non-directories-inside-vols
+
+b="B1"
+TEST touch ${!b}/glusterd/vols/file
+
+TEST $CLI_1 snapshot create snap1 $V0 no-timestamp;
+
+TEST touch ${!b}/glusterd/snaps/snap1/file
+
+#bug-1322145 - peer hosting snapshotted bricks should not be detachable
+
+kill_glusterd 2
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume replace-brick $V0 $H2:$L2 $H3:$L3 commit force
+
+# peer hosting snapshotted bricks should not be detachable
+TEST ! $CLI_1 peer detach $H2
+
+TEST killall_gluster
+TEST $glusterd_1
+TEST $glusterd_2
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/sync-post-glusterd-restart.t b/tests/bugs/glusterd/sync-post-glusterd-restart.t
new file mode 100644
index 00000000000..de3dff715ab
--- /dev/null
+++ b/tests/bugs/glusterd/sync-post-glusterd-restart.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function volume_get_field()
+{
+ local vol=$1
+ local field=$2
+ $CLI_2 volume get $vol $field | tail -1 | awk '{print $2}'
+}
+
+cleanup
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume set $V0 performance.readdir-ahead on
+
+# Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+##bug-1420637 and bug-1323287 - sync post glusterd restart
+
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 60
+TEST $CLI_1 volume set $V0 performance.readdir-ahead off
+TEST $CLI_1 volume set $V0 performance.write-behind off
+
+# Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a cluster
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+#bug-1420637-volume sync post glusterd restart
+
+EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio
+EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead
+
+#bug-1323287
+EXPECT_WITHIN $PROBE_TIMEOUT 'off' volume_get_field $V0 'write-behind'
+
+#bug-1213295 - volume stop should not crash glusterd post glusterd restart
+
+TEST $CLI_2 volume stop $V0
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+
+cleanup
diff --git a/tests/bugs/glusterd/validating-options-for-replicated-volume.t b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
new file mode 100644
index 00000000000..ddc80b17870
--- /dev/null
+++ b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
@@ -0,0 +1,142 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#bug-1314649 - validate group virt
+TEST $CLI volume set $V0 group virt;
+
+#bug-765230 - remove-quota-related-option-after-disabling-quota
+## setting soft-timeout as 20
+TEST $CLI volume set $V0 features.soft-timeout 20
+EXPECT '20' volinfo_field $V0 'features.soft-timeout';
+
+## enabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+## enabling quota
+TEST $CLI volume quota $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.quota'
+
+## eetting soft-timeout as 20
+TEST $CLI volume set $V0 features.soft-timeout 20
+EXPECT '20' volinfo_field $V0 'features.soft-timeout';
+
+## enabling features.quota-deem-statfs
+TEST $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+## disabling quota
+TEST $CLI volume quota $V0 disable
+EXPECT 'off' volinfo_field $V0 'features.quota'
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+EXPECT '' volinfo_field $V0 'features.soft-timeout'
+
+## setting soft-timeout as 30
+TEST $CLI volume set $V0 features.soft-timeout 30
+EXPECT '30' volinfo_field $V0 'features.soft-timeout';
+
+## disabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs off
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+TEST ! $CLI volume set $V0 statedump-path ""
+TEST ! $CLI volume set $V0 statedump-path " "
+TEST $CLI volume set $V0 statedump-path "/home/"
+EXPECT "/home/" volume_option $V0 server.statedump-path
+
+TEST ! $CLI volume set $V0 background-self-heal-count ""
+TEST ! $CLI volume set $V0 background-self-heal-count " "
+TEST $CLI volume set $V0 background-self-heal-count 10
+EXPECT "10" volume_option $V0 cluster.background-self-heal-count
+
+TEST ! $CLI volume set $V0 io-cache-size ""
+TEST ! $CLI volume set $V0 io-cache-size " "
+TEST $CLI volume set $V0 io-cache-size 64MB
+EXPECT "64MB" volume_option $V0 performance.io-cache-size
+
+TEST ! $CLI volume set $V0 quick-read-cache-size ""
+TEST ! $CLI volume set $V0 quick-read-cache-size " "
+TEST $CLI volume set $V0 quick-read-cache-size 512MB
+EXPECT "512MB" volume_option $V0 performance.quick-read-cache-size
+
+TEST ! $CLI volume set $V0 self-heal-daemon ""
+TEST ! $CLI volume set $V0 self-heal-daemon " "
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT "on" volume_option $V0 cluster.self-heal-daemon
+
+TEST ! $CLI volume set $V0 read-subvolume ""
+TEST ! $CLI volume set $V0 read-subvolume " "
+TEST $CLI volume set $V0 read-subvolume $V0-client-0
+EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
+
+TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
+TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
+TEST ! $CLI volume set $V0 data-self-heal-algorithm on
+TEST $CLI volume set $V0 data-self-heal-algorithm full
+EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
+
+TEST ! $CLI volume set $V0 min-free-inodes ""
+TEST ! $CLI volume set $V0 min-free-inodes " "
+TEST $CLI volume set $V0 min-free-inodes 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-inodes
+
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk " "
+TEST $CLI volume set $V0 min-free-disk 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-disk
+
+TEST $CLI volume set $V0 min-free-disk 120
+EXPECT "120" volume_option $V0 cluster.min-free-disk
+
+TEST ! $CLI volume set $V0 frame-timeout ""
+TEST ! $CLI volume set $V0 frame-timeout " "
+TEST $CLI volume set $V0 frame-timeout 0
+EXPECT "0" volume_option $V0 network.frame-timeout
+
+TEST ! $CLI volume set $V0 auth.allow ""
+TEST ! $CLI volume set $V0 auth.allow " "
+TEST $CLI volume set $V0 auth.allow 192.168.122.1
+EXPECT "192.168.122.1" volume_option $V0 auth.allow
+
+#bug-782095 - validate performance cache min/max size value
+
+## setting performance cache min size as 2MB
+TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## setting performance cache max size as 20MB
+TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## trying to set performance cache min size as 25MB
+TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## able to set performance cache min size as long as its lesser than max size
+TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
+EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## trying it out with only cache-max-file-size in CLI as 10MB
+TEST ! $CLI volume set $V0 cache-max-file-size 10MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup
diff --git a/tests/bugs/glusterd/validating-server-quorum.t b/tests/bugs/glusterd/validating-server-quorum.t
new file mode 100644
index 00000000000..ae7d83fd81c
--- /dev/null
+++ b/tests/bugs/glusterd/validating-server-quorum.t
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_fs {
+ df $1 &> /dev/null
+ echo $?
+}
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 3
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Lets create the volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}1 $H2:$B2/${V0}2 $H3:$B3/${V0}3
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+
+# Start the volume
+TEST $CLI_1 volume start $V0
+
+#bug-1345727 - bricks should be down when quorum is not met
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H3 $B3/${V0}3
+
+# Bring down glusterd on 2nd node
+TEST kill_glusterd 2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST kill_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
+
+# Server quorum is not met. Brick on 1st node must be down
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+
+# Set quorum ratio 95. means 95 % or more than 95% nodes of total available node
+# should be available for performing volume operation.
+# i.e. Server-side quorum is met if the number of nodes that are available is
+# greater than or equal to 'quorum-ratio' times the number of nodes in the
+# cluster
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 95
+
+#bug-1483058 - replace-brick should fail when quorum is not met
+TEST ! $CLI_1 volume replace-brick $V0 $H2:$B2/${V0}2 $H1:$B1/${V0}2_new commit force
+
+#Bring back 2nd glusterd
+TEST $glusterd_2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+# Server quorum is still not met. Bricks should be down on 1st and 2nd nodes
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2
+
+# Bring back 3rd glusterd
+TEST $glusterd_3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Server quorum is met now. Bricks should be up on all nodes
+# Check from 3rd instance of glusterd so that the 3rd node finishes all its
+# handshake and then report back the brick status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H3 $B3/${V0}3
+
+# Check from 1st instance of glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H3 $B3/${V0}3
+
+# TODO : Because commit fe71ee7 introduced a delay of 1 sec to wait for shd connect and
+# disconnect events to be serially processed during a restart of shd daemon,
+# this introduced a race where while releasing big lock, if any command sneaks
+# and acquires the big lock, it might be able to work on a volinfo which is
+# stale. We need to find a better way to fix this.
+
+sleep 3
+
+# quorum is met. replace-brick will execute successfully
+EXPECT_WITHIN $PEER_SYNC_TIMEOUT 0 attempt_replace_brick 1 $V0 $H2:$B2/${V0}2 $H2:$B2/${V0}2_new
+
+TEST $CLI_1 volume reset all
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2_new
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H3 $B3/${V0}3
+
+
+#bug-913555 - volume should become unwritable when quorum does not met
+
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+# Kill one pseudo-node, make sure the others survive and volume stays up.
+TEST kill_node 3;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2_new
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+# Kill another pseudo-node, make sure the last one dies and volume goes down.
+TEST kill_node 2;
+EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
+#two glusterfsds of the other two glusterds must be dead
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_fs $M0;
+
+TEST $glusterd_2;
+TEST $glusterd_3;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+cleanup