summaryrefslogtreecommitdiffstats
path: root/tests/bugs/glusterd
diff options
context:
space:
mode:
Diffstat (limited to 'tests/bugs/glusterd')
-rwxr-xr-xtests/bugs/glusterd/859927/repl.t59
-rw-r--r--tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t110
-rw-r--r--tests/bugs/glusterd/brick-mux-validation-in-cluster.t108
-rw-r--r--tests/bugs/glusterd/brick-mux-validation.t104
-rw-r--r--tests/bugs/glusterd/brick-mux.t81
-rw-r--r--tests/bugs/glusterd/brick-order-check-add-brick.t61
-rwxr-xr-xtests/bugs/glusterd/bug-1070734.t80
-rw-r--r--tests/bugs/glusterd/bug-1085330-and-bug-916549.t93
-rwxr-xr-xtests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t93
-rw-r--r--tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t44
-rw-r--r--tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t38
-rw-r--r--tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t29
-rw-r--r--tests/bugs/glusterd/bug-1595320.t93
-rw-r--r--tests/bugs/glusterd/bug-1696046.t113
-rw-r--r--tests/bugs/glusterd/bug-1699339.t73
-rw-r--r--tests/bugs/glusterd/bug-1720566.t50
-rw-r--r--tests/bugs/glusterd/bug-824753-file-locker.c46
-rwxr-xr-xtests/bugs/glusterd/bug-824753.t45
-rw-r--r--tests/bugs/glusterd/bug-948729/bug-948729-force.t103
-rw-r--r--tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t77
-rw-r--r--tests/bugs/glusterd/bug-948729/bug-948729.t80
-rw-r--r--tests/bugs/glusterd/bug-949930.t29
-rw-r--r--tests/bugs/glusterd/check_elastic_server.t63
-rw-r--r--tests/bugs/glusterd/daemon-log-level-option.t93
-rw-r--r--tests/bugs/glusterd/df-results-post-replace-brick-operations.t61
-rw-r--r--tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t71
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t115
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases.t305
-rw-r--r--tests/bugs/glusterd/quorum-validation.t122
-rw-r--r--tests/bugs/glusterd/rebalance-in-cluster.t52
-rw-r--r--tests/bugs/glusterd/rebalance-operations-in-single-node.t131
-rw-r--r--tests/bugs/glusterd/remove-brick-in-cluster.t60
-rw-r--r--tests/bugs/glusterd/remove-brick-testcases.t119
-rw-r--r--tests/bugs/glusterd/remove-brick-validation.t68
-rw-r--r--tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t80
-rw-r--r--tests/bugs/glusterd/replace-brick-operations.t48
-rw-r--r--tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t63
-rw-r--r--tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t54
-rw-r--r--tests/bugs/glusterd/snapshot-operations.t50
-rw-r--r--tests/bugs/glusterd/sync-post-glusterd-restart.t54
-rw-r--r--tests/bugs/glusterd/validating-options-for-replicated-volume.t142
-rw-r--r--tests/bugs/glusterd/validating-server-quorum.t125
42 files changed, 3485 insertions, 0 deletions
diff --git a/tests/bugs/glusterd/859927/repl.t b/tests/bugs/glusterd/859927/repl.t
new file mode 100755
index 00000000000..6e7c23b5b1d
--- /dev/null
+++ b/tests/bugs/glusterd/859927/repl.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd
+
+#Tests for data-self-heal-algorithm option
+function create_setup_for_self_heal {
+ file=$1
+ kill_brick $V0 $H0 $B0/${V0}1
+ dd of=$file if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null
+ $CLI volume start $V0 force
+}
+
+function test_write {
+ dd of=$M0/a if=/dev/urandom bs=1k count=1 2>&1 > /dev/null
+}
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 client-log-level DEBUG
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0;
+
+touch $M0/a
+
+TEST $CLI volume set $V0 cluster.data-self-heal-algorithm full
+EXPECT full volume_option $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+cat $file > /dev/null 2>&1
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST $CLI volume set $V0 cluster.data-self-heal-algorithm diff
+EXPECT diff volume_option $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+cat $file > /dev/null 2>&1
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST $CLI volume reset $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+cat $file > /dev/null 2>&1
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST ! $CLI volume set $V0 cluster.data-self-heal-algorithm ""
+
+cleanup;
diff --git a/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t b/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t
new file mode 100644
index 00000000000..95d0eb69ac1
--- /dev/null
+++ b/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t
@@ -0,0 +1,110 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+
+#bug-1102656 - validating volume top command
+
+TEST $CLI volume top $V0 open
+TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick
+TEST $CLI volume top $V0 read
+
+TEST $CLI volume status
+
+#bug- 1002556
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}3 force
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST killall glusterd
+TEST glusterd
+
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+#bug-1406411- fail-add-brick-when-replica-count-changes
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#add-brick should fail
+TEST ! $CLI_NO_FORCE volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
+TEST $CLI volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+TEST kill_brick $V1 $H0 $B0/${V1}1
+
+#add-brick should fail
+TEST ! $CLI_NO_FORCE volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
+
+TEST $CLI volume start $V1 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+
+TEST $CLI volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
+
+#bug-905307 - validate cluster.post-op-delay-secs option
+
+#Strings should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
+
+#-ve ints should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
+
+#INT_MAX+1 should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
+
+#floats should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
+
+#min val 0 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
+EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
+
+#max val 2147483647 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
+EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
+
+#some middle val in range 2147 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
+EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
+
+#bug-1265479 - validate-replica-volume-options
+
+#Setting data-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 data-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
+TEST $CLI volume set $V1 cluster.data-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
+
+#Setting metadata-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 metadata-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.metadata-self-heal';
+TEST $CLI volume set $V1 cluster.metadata-self-heal on
+
+#Setting entry-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 entry-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
+TEST $CLI volume set $V1 cluster.entry-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
+
+cleanup
diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
new file mode 100644
index 00000000000..b6af487a791
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+function count_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
+}
+
+function count_N/A_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -- '\-1' | sort | uniq | wc -l
+}
+
+function check_peers {
+ $CLI_2 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 3
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 volume set all cluster.brick-multiplex on
+#bug-1609163 - bricks of normal volume should not attach to bricks of gluster_shared_storage volume
+
+##Create, start and mount meta_volume i.e., shared_storage
+TEST $CLI_1 volume create $META_VOL replica 3 $H1:$B1/${META_VOL}1 $H2:$B2/${META_VOL}1 $H3:$B3/${META_VOL}1
+TEST $CLI_1 volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H1 --volfile-id $META_VOL $META_MNT
+
+TEST $CLI_1 volume info gluster_shared_storage
+
+EXPECT 3 count_brick_processes
+
+#create and start a new volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}{1..3} $H2:$B2/${V0}{1..3}
+TEST $CLI_1 volume start $V0
+
+# bricks of normal volume should not attach to bricks of gluster_shared_storage volume
+EXPECT 5 count_brick_processes
+
+#bug-1549996 - stale brick processes on the nodes after volume deletion
+
+TEST $CLI_1 volume create $V1 replica 3 $H1:$B1/${V1}{1..3} $H2:$B2/${V1}{1..3}
+TEST $CLI_1 volume start $V1
+
+EXPECT 5 count_brick_processes
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume stop $V1
+
+EXPECT 3 count_brick_processes
+
+TEST $CLI_1 volume stop $META_VOL
+
+TEST $CLI_1 volume delete $META_VOL
+TEST $CLI_1 volume delete $V0
+TEST $CLI_1 volume delete $V1
+
+#bug-1773856 - Brick process fails to come up with brickmux on
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 $H3:$B3/${V0}1 force
+TEST $CLI_1 volume start $V0
+
+
+EXPECT 3 count_brick_processes
+
+#create and start a new volume
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}2 $H2:$B2/${V1}2 $H3:$B3/${V1}2 force
+TEST $CLI_1 volume start $V1
+
+EXPECT 3 count_brick_processes
+
+V2=patchy2
+TEST $CLI_1 volume create $V2 $H1:$B1/${V2}3 $H2:$B2/${V2}3 $H3:$B3/${V2}3 force
+TEST $CLI_1 volume start $V2
+
+EXPECT 3 count_brick_processes
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
+
+TEST kill_node 1
+
+sleep 10
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+$CLI_2 volume set $V0 performance.readdir-ahead on
+$CLI_2 volume set $V1 performance.readdir-ahead on
+
+TEST $glusterd_1;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_N/A_brick_pids
+
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux-validation.t b/tests/bugs/glusterd/brick-mux-validation.t
new file mode 100644
index 00000000000..61b0455f9a8
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux-validation.t
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../volume.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+function count_brick_pids {
+ $CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
+}
+
+cleanup;
+
+#bug-1451248 - validate brick mux after glusterd reboot
+
+TEST glusterd
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}
+TEST $CLI volume start $V0
+
+EXPECT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
+
+pkill gluster
+TEST glusterd
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1..3}
+TEST $CLI volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+
+#bug-1560957 - brick status goes offline after remove-brick followed by add-brick
+
+pkill glusterd
+TEST glusterd
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 force
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1_new force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+
+#bug-1446172 - reset brick with brick multiplexing enabled
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1_new start
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
+EXPECT 1 count_brick_processes
+
+# Negative case with brick killed but volume-id xattr present
+TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit
+
+# reset-brick commit force should work and should bring up the brick
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_new commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+EXPECT 1 count_brick_processes
+TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 $M1;
+# Create files
+for i in {1..5}
+do
+ echo $i > $M1/file$i.txt
+done
+
+TEST $CLI volume reset-brick $V1 $H0:$B0/${V1}1 start
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
+EXPECT 1 count_brick_processes
+
+# Simulate reset disk
+for i in {1..5}
+do
+ rm -rf $B0/${V1}1/file$i.txt
+done
+
+setfattr -x trusted.glusterfs.volume-id $B0/${V1}1
+setfattr -x trusted.gfid $B0/${V1}1
+
+# Test reset-brick commit. Using CLI_IGNORE_PARTITION since normal CLI uses
+# the --wignore flag that essentially makes the command act like "commit force"
+TEST $CLI_IGNORE_PARTITION volume reset-brick $V1 $H0:$B0/${V1}1 $H0:$B0/${V1}1 commit
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+EXPECT 1 count_brick_processes
+
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux.t b/tests/bugs/glusterd/brick-mux.t
new file mode 100644
index 00000000000..927940534c1
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux.t
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+cleanup
+
+#bug-1444596 - validating brick mux
+
+TEST glusterd -LDEBUG
+TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
+TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
+
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume start $V0
+TEST $CLI volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+#bug-1499509 - stop all the bricks when a brick process is killed
+kill -9 $(pgrep glusterfsd)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 online_brick_count
+
+TEST $CLI volume start $V0 force
+TEST $CLI volume start $V1 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+TEST $CLI volume set $V1 performance.io-cache-size 32MB
+TEST $CLI volume stop $V1
+TEST $CLI volume start $V1
+
+#Check No. of brick processes after change option
+EXPECT 2 count_brick_processes
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd should not be NA
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 2 count_brick_processes
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd should not be NA
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 2 count_brick_processes
+
+#bug-1444596_brick_mux_posix_hlth_chk_status
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST rm -rf $H0:$B0/brick{0,1}
+
+#Check No. of brick processes after remove brick from back-end
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
+
+TEST glusterfs -s $H0 --volfile-id $V1 $M0
+TEST touch $M0/file{1..10}
+
+pkill glusterd
+TEST glusterd -LDEBUG
+sleep 5
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
+
+cleanup
+
diff --git a/tests/bugs/glusterd/brick-order-check-add-brick.t b/tests/bugs/glusterd/brick-order-check-add-brick.t
new file mode 100644
index 00000000000..0be31dac768
--- /dev/null
+++ b/tests/bugs/glusterd/brick-order-check-add-brick.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST verify_lvm_version;
+#Create cluster with 3 nodes
+TEST launch_cluster 3 -NO_DEBUG -NO_FORCE
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 volume create $V0 replica 3 $H1:$L1/$V0 $H2:$L2/$V0 $H3:$L3/$V0
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#add-brick with or without mentioning the replica count should not fail
+TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}_1 $H2:$L2/${V0}_1 $H3:$L3/${V0}_1
+EXPECT '2 x 3 = 6' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_2 $H2:$L2/${V0}_2 $H3:$L3/${V0}_2
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host with force should succeed
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 force
+EXPECT '4 x 3 = 12' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1/${V0}1 $H2:$L2/${V0}1
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#Add-brick with Increasing replica count
+TEST $CLI_1 volume add-brick $V0 replica 3 $H3:$L3/${V0}1
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+#Add-brick with Increasing replica count from same host should fail
+TEST ! $CLI_1 volume add-brick $V0 replica 5 $H1:$L1/${V0}2 $H1:$L1/${V0}3
+
+#adding multiple bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}{4..6} $H2:$L2/${V0}{7..9}
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1070734.t b/tests/bugs/glusterd/bug-1070734.t
new file mode 100755
index 00000000000..0afcb3b37b3
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1070734.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 nfs.disable false
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0;
+
+############################################################################
+#TEST-PLAN:
+#Create a directory DIR and a file inside DIR
+#check the hash brick of the file
+#delete the directory for recreating later after remove-brick
+#remove the brick where the files hashed to
+#After remove-brick status says complete go on creating the same directory \
+#DIR and file
+#Check if the file now falls into the other brick
+#Check if the other brick gets the full layout and the remove brick gets \
+#the zeroed layout
+############################################################################
+
+TEST mkdir $N0/DIR;
+
+TEST touch $N0/DIR/file;
+
+if [ -f $B0/${V0}1/DIR/file ]
+then
+ HASHED=$B0/${V0}1;
+ OTHERBRICK=$B0/${V0}2;
+else
+ HASHED=$B0/${V0}2;
+ OTHERBRICK=$B0/${V0}1;
+fi
+
+TEST rm -f $N0/DIR/file;
+TEST rmdir $N0/DIR;
+TEST $CLI volume remove-brick $V0 $H0:${HASHED} start;
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \
+"$H0:${HASHED}";
+
+TEST mkdir $N0/DIR;
+TEST touch $N0/DIR/file;
+
+#Check now the file should fall in to OTHERBRICK
+TEST [ -f ${OTHERBRICK}/DIR/file ]
+
+#Check the DIR on HASHED should have got zeroed layout and the \
+#OTHERBRICK should have got full layout
+shorter_layout () {
+ dht_get_layout $1 | cut -c 19-
+}
+EXPECT "0000000000000000" shorter_layout $HASHED/DIR ;
+EXPECT "00000000ffffffff" shorter_layout $OTHERBRICK/DIR;
+
+## Before killing daemon to avoid deadlocks
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1085330-and-bug-916549.t b/tests/bugs/glusterd/bug-1085330-and-bug-916549.t
new file mode 100644
index 00000000000..892a30d74ea
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1085330-and-bug-916549.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+STR="1234567890"
+volname="Vol"
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+#testcase: bug-1085330
+
+# Construct volname string such that its more than 256 characters
+for i in {1..30}
+do
+ volname+=$STR
+done
+# Now $volname is more than 256 chars
+
+TEST ! $CLI volume create $volname $H0:$B0/${volname}{1,2};
+
+TEST $CLI volume info;
+
+# Construct brick string such that its more than 256 characters
+volname="Vol1234"
+brick="brick"
+for i in {1..30}
+do
+ brick+=$STR
+done
+# Now $brick1 is more than 256 chars
+
+TEST ! $CLI volume create $volname $H0:$B0/$brick;
+
+TEST $CLI volume info;
+
+# Now try to create a volume with couple of bricks (strlen(volname) = 128 &
+# strlen(brick1) = 128
+# Command should still fail as strlen(volp path) > 256
+
+volname="Volume-0"
+brick="brick-00"
+STR="12345678"
+
+for i in {1..15}
+do
+ volname+=$STR
+ brick+=$STR
+done
+TEST ! $CLI volume create $volname $H0:$B0/$brick;
+
+TEST $CLI volume info;
+
+# test case with brick path as 255 and a trailing "/"
+brick=""
+STR1="12345678"
+volname="vol"
+
+for i in {1..31}
+do
+ brick+=$STR1
+done
+brick+="123456/"
+
+echo $brick | wc -c
+# Now $brick is exactly 255 chars, but at end a trailing space
+# This will still fail as volfpath exceeds more than _POSIX_MAX chars
+
+TEST ! $CLI volume create $volname $H0:$B0/$brick;
+
+TEST $CLI volume info;
+
+# Positive test case
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+
+TEST $CLI volume info;
+
+TEST $CLI volume start $V0;
+
+#testcase: bug-916549
+
+pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/);
+brick_pid=$(cat $GLUSTERD_PIDFILEDIR/vols/$V0/$pid_file);
+
+kill -SIGKILL $brick_pid;
+TEST $CLI volume start $V0 force;
+TEST process_leak_count $(pidof glusterd);
+
+cleanup
+
diff --git a/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t b/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t
new file mode 100755
index 00000000000..9ac9d8fedd9
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+## Lets create partitions for bricks
+TEST truncate -s 100M $B0/brick1
+TEST truncate -s 200M $B0/brick2
+TEST truncate -s 200M $B0/brick3
+TEST truncate -s 200M $B0/brick4
+
+
+TEST LO1=`SETUP_LOOP $B0/brick1`
+TEST LO2=`SETUP_LOOP $B0/brick2`
+TEST LO3=`SETUP_LOOP $B0/brick3`
+TEST LO4=`SETUP_LOOP $B0/brick4`
+
+
+TEST MKFS_LOOP $LO1
+TEST MKFS_LOOP $LO2
+TEST MKFS_LOOP $LO3
+TEST MKFS_LOOP $LO4
+
+TEST mkdir -p ${B0}/${V0}{0..3}
+
+
+TEST MOUNT_LOOP $LO1 $B0/${V0}0
+TEST MOUNT_LOOP $LO2 $B0/${V0}1
+TEST MOUNT_LOOP $LO3 $B0/${V0}2
+TEST MOUNT_LOOP $LO4 $B0/${V0}3
+
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+
+CLI_1_WITHOUT_WIGNORE=$(echo $CLI_1 | sed 's/ --wignore//')
+
+
+# Creating volume with non resolvable host name
+TEST ! $CLI_1_WITHOUT_WIGNORE volume create $V0 replica 2 \
+ $H1:$B0/${V0}0/brick redhat:$B0/${V0}1/brick \
+ $H1:$B0/${V0}2/brick redhat:$B0/${V0}3/brick
+
+
+#Workaround for Bug:1091935
+#Failure to create volume above leaves 1st brick with xattrs.
+rm -rf $B0/${V0}{0..3}/brick;
+
+
+# Creating distribute-replica volume with bad brick order. It will fail
+# due to bad brick order.
+TEST ! $CLI_1_WITHOUT_WIGNORE volume create $V0 replica 2 \
+ $H1:$B0/${V0}0/brick $H1:$B0/${V0}1/brick \
+ $H1:$B0/${V0}2/brick $H1:$B0/${V0}3/brick
+
+
+
+#Workaround for Bug:1091935
+#Failure to create volume above leaves 1st brick with xattrs.
+rm -rf $B0/${V0}{0..3}/brick;
+
+
+
+# Test for positive case, volume create should pass for
+# resolved hostnames and bricks in order.
+TEST $CLI_1_WITHOUT_WIGNORE volume create $V0 replica 2 \
+ $H1:$B0/${V0}0/brick $H2:$B0/${V0}1/brick \
+ $H1:$B0/${V0}2/brick $H2:$B0/${V0}3/brick
+
+# Delete the volume as we want to reuse bricks
+TEST $CLI_1_WITHOUT_WIGNORE volume delete $V0
+
+
+# Now with force at the end of command it will bypass brick-order check
+# for replicate or distribute-replicate volume. and it will create volume
+TEST $CLI_1_WITHOUT_WIGNORE volume create $V0 replica 2 \
+ $H1:$B0/${V0}0/brick $H1:$B0/${V0}1/brick \
+ $H1:$B0/${V0}2/brick $H1:$B0/${V0}3/brick force
+
+# Need to cleanup the loop back devices.
+UMOUNT_LOOP ${B0}/${V0}{0..3}
+rm -f ${B0}/brick{1..4}
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t b/tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t
new file mode 100644
index 00000000000..7be076caaf3
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+## Test case for stopping all running daemons service on peer detach.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+
+## Start a 2 node virtual cluster
+TEST launch_cluster 2;
+
+## Peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+
+## Creating and starting volume
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H1:$B1/${V0}1
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume set $V0 nfs.disable off
+
+## To Do: Add test case for quota and snapshot daemon. Currently quota
+## Daemon is not working in cluster framework. And sanpd daemon
+## Start only in one node in cluster framework. Add test case
+## once patch http://review.gluster.org/#/c/11666/ merged,
+
+## We are having 2 node "nfs" daemon should run on both node.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_nfs_count
+
+## Detach 2nd node from the cluster.
+TEST $CLI_1 peer detach $H2;
+
+
+## After detaching 2nd node we will have only 1 nfs and quota daemon running.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t
new file mode 100644
index 00000000000..c229d4371b6
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create volume V0 and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+## Lets create volume V1 and start the volume
+TEST $CLI volume create $V1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume start $V1
+
+## Enable quota on 2nd volume
+TEST $CLI volume quota $V1 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+## Killing all gluster process
+pkill gluster;
+
+## there should not be any quota daemon running after killing quota process
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_quotad_count
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Quotad daemon should start on restarting the glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t b/tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t
new file mode 100644
index 00000000000..967595e4dbb
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+#Tests for add new line in peers file
+function add_new_line_to_peer_file {
+ UUID_NAME=$($CLI_1 peer status | grep Uuid)
+ PEER_ID=$(echo $UUID_NAME | cut -c 7-)
+ GD_WD=$($CLI_1 system getwd)
+ GD_WD+=/peers/
+ PATH_TO_PEER_FILE=$GD_WD$PEER_ID
+ sed -i '1s/^/\n/gm; $s/$/\n/gm' $PATH_TO_PEER_FILE
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+add_new_line_to_peer_file
+
+TEST kill_glusterd 1
+TEST $glusterd_1
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1595320.t b/tests/bugs/glusterd/bug-1595320.t
new file mode 100644
index 00000000000..c10e11821a1
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1595320.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup
+
+function count_up_bricks {
+ $CLI --xml volume status $V0 | grep '<status>1' | wc -l
+}
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+# Setup 3 LVMS
+LVM_PREFIX="test"
+TEST init_n_bricks 3
+TEST setup_lvm 3
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd
+
+# Create volume and enable brick multiplexing
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3
+TEST $CLI v set all cluster.brick-multiplex on
+
+# Start the volume
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
+EXPECT 1 count_brick_processes
+
+# Kill volume ungracefully
+brick_pid=`pgrep glusterfsd`
+
+# Make sure every brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+
+b1_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-1*.pid)
+b2_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-2*.pid)
+b3_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-3*.pid)
+
+kill -9 $brick_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_brick_processes
+
+# Unmount 3rd brick root from node
+brick_root=$L3
+_umount_lv 3
+
+# Start the volume only 2 brick should be start
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks
+EXPECT 1 count_brick_processes
+
+brick_pid=`pgrep glusterfsd`
+
+# Make sure only two brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 0 ]
+
+# Mount the brick root
+TEST mkdir -p $brick_root
+TEST mount -t xfs -o nouuid /dev/test_vg_3/brick_lvm $brick_root
+
+# Replace brick_pid file to test brick_attach code
+TEST cp $b1_pid_file $b3_pid_file
+
+# Start the volume all brick should be up
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
+EXPECT 1 count_brick_processes
+
+# Make sure every brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1696046.t b/tests/bugs/glusterd/bug-1696046.t
new file mode 100644
index 00000000000..e1c1eb2ceb9
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1696046.t
@@ -0,0 +1,113 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function count_up_bricks {
+ $CLI --xml volume status $1 | grep '<status>1' | wc -l
+}
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+logdir=`gluster --print-logdir`
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume set all cluster.brick-multiplex on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3};
+TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{1,2,3};
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume start $V1;
+EXPECT 'Started' volinfo_field $V1 'Status';
+
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V1
+
+EXPECT 1 count_brick_processes
+
+# Mount V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+function client-log-file-name()
+{
+ logfilename=$M0".log"
+ echo ${logfilename:1} | tr / -
+}
+
+function brick-log-file-name()
+{
+ logfilename=$B0"/"$V0"1.log"
+ echo ${logfilename:1} | tr / -
+}
+
+log_file=$logdir"/"`client-log-file-name`
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+brick_log_file=$logdir"/bricks/"`brick-log-file-name`
+nofdlog=$(cat $brick_log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+## Set brick-log-level to DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+
+# Do some operation
+touch $M0/file1
+
+# Check debug message debug message should be exist only for V0
+# Server xlator is common in brick_mux so after enabling DEBUG log
+# some debug message should be available for other xlators like posix
+
+brick_log_file=$logdir"/bricks/"`brick-log-file-name`
+nofdlog=$(cat $brick_log_file | grep file1 | grep -v server | wc -l)
+TEST [ $((nofdlog)) -ne 0 ]
+
+#Check if any debug log exist in client-log file
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+## Set brick-log-level to INFO
+TEST $CLI volume set $V0 diagnostics.brick-log-level INFO
+
+## Set client-log-level to DEBUG
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+
+# Do some operation
+touch $M0/file2
+
+nofdlog=$(cat $brick_log_file | grep " D " | grep file2 | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -ne 0 ]
+
+# Unmount V0
+TEST umount $M0
+
+#Mount V1
+TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+#do some operation
+touch $M0/file3
+
+
+# DEBUG log level is enabled only for V0 so no debug message should be available
+# in log specific to file2 creation except for server xlator, server xlator is
+# common xlator in brick mulitplex
+nofdlog=$(cat $brick_log_file | grep file3 | grep -v server | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+# Unmount V1
+TEST umount $M0
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1699339.t b/tests/bugs/glusterd/bug-1699339.t
new file mode 100644
index 00000000000..bb8d4f46eb8
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1699339.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+NUM_VOLS=15
+
+
+get_brick_base () {
+ printf "%s/vol%02d" $B0 $1
+}
+
+function count_up_bricks {
+ vol=$1;
+ $CLI_1 --xml volume status $vol | grep '<status>1' | wc -l
+}
+
+create_volume () {
+
+ local vol_name=$(printf "%s-vol%02d" $V0 $1)
+
+ TEST $CLI_1 volume create $vol_name replica 3 $H1:$B1/${vol_name} $H2:$B2/${vol_name} $H3:$B3/${vol_name}
+ TEST $CLI_1 volume start $vol_name
+}
+
+TEST launch_cluster 3
+TEST $CLI_1 volume set all cluster.brick-multiplex on
+
+# The option accepts the value in the range from 5 to 200
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 210
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 4
+
+TEST $CLI_1 volume set all glusterd.vol_count_per_thread 5
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Our infrastructure can't handle an arithmetic expression here. The formula
+# is (NUM_VOLS-1)*5 because it sees each TEST/EXPECT once but needs the other
+# NUM_VOLS-1 and there are 5 such statements in each iteration.
+TESTS_EXPECTED_IN_LOOP=28
+for i in $(seq 1 $NUM_VOLS); do
+ starttime="$(date +%s)";
+ create_volume $i
+done
+
+TEST kill_glusterd 1
+
+TESTS_EXPECTED_IN_LOOP=4
+for i in `seq 1 3 15`
+do
+vol1=$(printf "%s-vol%02d" $V0 $i)
+TEST $CLI_2 volume set $vol1 performance.readdir-ahead on
+done
+
+# Bring back 1st glusterd
+TEST $glusterd_1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TESTS_EXPECTED_IN_LOOP=4
+for i in `seq 1 3 15`
+do
+vol1=$(printf "%s-vol%02d" $V0 $i)
+EXPECT_WITHIN $PROBE_TIMEOUT "on" volinfo_field_1 $vol1 performance.readdir-ahead
+done
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1720566.t b/tests/bugs/glusterd/bug-1720566.t
new file mode 100644
index 00000000000..99bcf6ff785
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1720566.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+
+cleanup;
+V0="TestLongVolnamec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9abcd"
+V1="TestLongVolname3102bd28a16c49440bd5210e4ec4d5d93102bd28a16c49440bd5210e4ec4d5d933102bd28a16c49440bd5210e4ebbcd"
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+$CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+EXPECT 'Created' cluster_volinfo_field 1 $V1 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V1
+EXPECT 'Started' cluster_volinfo_field 1 $V1 'Status';
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V1 $M1;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST mkdir $M1/dir{1..4};
+TEST touch $M1/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}_1 $H2:$B2/${V0}_1
+TEST $CLI_1 volume add-brick $V1 $H1:$B1/${V1}_1 $H2:$B2/${V1}_1
+
+
+TEST $CLI_1 volume rebalance $V0 start
+TEST $CLI_1 volume rebalance $V1 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V1
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-824753-file-locker.c b/tests/bugs/glusterd/bug-824753-file-locker.c
new file mode 100644
index 00000000000..f5dababad30
--- /dev/null
+++ b/tests/bugs/glusterd/bug-824753-file-locker.c
@@ -0,0 +1,46 @@
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+int
+main(int argc, char *argv[])
+{
+ int fd = -1;
+ int ret = -1;
+ char command[2048] = "";
+ char filepath[255] = "";
+ struct flock fl;
+
+ fl.l_type = F_WRLCK;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = 7;
+ fl.l_len = 1;
+ fl.l_pid = getpid();
+
+ snprintf(filepath, 255, "%s/%s", argv[4], argv[5]);
+
+ fd = open(filepath, O_RDWR);
+
+ if (fd == -1)
+ return -1;
+
+ if (fcntl(fd, F_SETLKW, &fl) == -1) {
+ return -1;
+ }
+
+ snprintf(command, sizeof(command),
+ "gluster volume clear-locks %s /%s kind all posix 0,7-1 |"
+ " grep %s | awk -F'..: ' '{print $1}' | grep %s:%s/%s",
+ argv[1], argv[5], argv[2], argv[2], argv[3], argv[1]);
+
+ ret = system(command);
+ close(fd);
+
+ if (ret)
+ return -1;
+ else
+ return 0;
+}
diff --git a/tests/bugs/glusterd/bug-824753.t b/tests/bugs/glusterd/bug-824753.t
new file mode 100755
index 00000000000..b969e28f35e
--- /dev/null
+++ b/tests/bugs/glusterd/bug-824753.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+touch $M0/file1;
+
+TEST $CC -g $(dirname $0)/bug-824753-file-locker.c -o $(dirname $0)/file-locker
+
+TEST $(dirname $0)/file-locker $V0 $H0 $B0 $M0 file1
+
+## Finish up
+TEST rm -f $(dirname $0)/file-locker
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948729/bug-948729-force.t b/tests/bugs/glusterd/bug-948729/bug-948729-force.t
new file mode 100644
index 00000000000..f4f71f9a1e2
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948729/bug-948729-force.t
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+V1=patchy1
+V2=patchy2
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+B3=/d/backends/3
+B4=/d/backends/4
+B5=/d/backends/5
+B6=/d/backends/6
+
+mkdir -p $B3 $B4 $B5 $B6
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+TEST truncate -s 16M $B4/brick4
+TEST truncate -s 16M $B5/brick5
+TEST truncate -s 16M $B6/brick6
+
+TEST LD1=`SETUP_LOOP $B1/brick1`
+TEST MKFS_LOOP $LD1
+TEST LD2=`SETUP_LOOP $B2/brick2`
+TEST MKFS_LOOP $LD2
+TEST LD3=`SETUP_LOOP $B3/brick3`
+TEST MKFS_LOOP $LD3
+TEST LD4=`SETUP_LOOP $B4/brick4`
+TEST MKFS_LOOP $LD4
+TEST LD5=`SETUP_LOOP $B5/brick5`
+TEST MKFS_LOOP $LD5
+TEST LD6=`SETUP_LOOP $B6/brick6`
+TEST MKFS_LOOP $LD6
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 $B4/$V0 $B5/$V0 $B6/$V0
+
+TEST MOUNT_LOOP $LD1 $B1/$V0
+TEST MOUNT_LOOP $LD2 $B2/$V0
+TEST MOUNT_LOOP $LD3 $B3/$V0
+TEST MOUNT_LOOP $LD4 $B4/$V0
+TEST MOUNT_LOOP $LD5 $B5/$V0
+TEST MOUNT_LOOP $LD6 $B6/$V0
+
+#Case 0: Parent directory of the brick is absent
+TEST ! $CLI1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 force
+
+#Case 1: File system root is being used as brick directory
+TEST $CLI1 volume create $V0 $H1:$B5/$V0 $H2:$B6/$V0 force
+
+#Case 2: Brick directory contains only one component
+TEST $CLI1 volume create $V1 $H1:/$uuid1 $H2:/$uuid2 force
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $CLI1 volume create $V2 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 force
+
+#add-brick tests
+TEST ! $CLI1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 force
+TEST $CLI1 volume add-brick $V0 $H1:$B3/$V0 force
+TEST $CLI1 volume add-brick $V1 $H1:/$uuid3 force
+TEST $CLI1 volume add-brick $V2 $H1:$B4/$V0/brick3 force
+
+#####replace-brick tests
+#FIX-ME: replace-brick does not work with the newly introduced cluster test
+#####framework
+
+rmdir /$uuid1 /$uuid2 /$uuid3;
+
+$CLI volume stop $V0
+$CLI volume stop $V1
+$CLI volume stop $V2
+
+UMOUNT_LOOP $B1/$V0
+UMOUNT_LOOP $B2/$V0
+UMOUNT_LOOP $B3/$V0
+UMOUNT_LOOP $B4/$V0
+UMOUNT_LOOP $B5/$V0
+UMOUNT_LOOP $B6/$V0
+
+rm -f $B1/brick1
+rm -f $B2/brick2
+rm -f $B3/brick3
+rm -f $B4/brick4
+rm -f $B5/brick5
+rm -f $B6/brick6
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t b/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t
new file mode 100644
index 00000000000..18bf9a1c4b6
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+B3=/d/backends/3
+mkdir -p $B3
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+
+TEST LD1=`SETUP_LOOP $B1/brick1`
+TEST MKFS_LOOP $LD1
+TEST LD2=`SETUP_LOOP $B2/brick2`
+TEST MKFS_LOOP $LD2
+TEST LD3=`SETUP_LOOP $B3/brick3`
+TEST MKFS_LOOP $LD3
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0
+
+TEST MOUNT_LOOP $LD1 $B1/$V0
+TEST MOUNT_LOOP $LD2 $B2/$V0
+TEST MOUNT_LOOP $LD3 $B3/$V0
+
+cli1=$(echo $CLI1 | sed 's/ --wignore//')
+
+#Case 0: Parent directory of the brick is absent
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2
+
+#Case 1: File system root being used as brick directory
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+
+#Case 2: Brick directory contains only one component
+TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2
+
+#add-brick tests
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0
+TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3
+TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3
+
+#####replace-brick tests
+#FIX-ME : replace-brick does not currently work in the newly introduced
+#####cluster test framework
+
+$CLI1 volume stop $V0
+
+UMOUNT_LOOP $B1/$V0
+UMOUNT_LOOP $B2/$V0
+UMOUNT_LOOP $B3/$V0
+
+rm -f $B1/brick1
+rm -f $B2/brick2
+rm -f $B3/brick3
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948729/bug-948729.t b/tests/bugs/glusterd/bug-948729/bug-948729.t
new file mode 100644
index 00000000000..2b574aa1a14
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948729/bug-948729.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+B3=/d/backends/3
+
+mkdir -p $B3
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+
+TEST LD1=`SETUP_LOOP $B1/brick1`
+TEST MKFS_LOOP $LD1
+TEST LD2=`SETUP_LOOP $B2/brick2`
+TEST MKFS_LOOP $LD2
+TEST LD3=`SETUP_LOOP $B3/brick3`
+TEST MKFS_LOOP $LD3
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0
+
+TEST MOUNT_LOOP $LD1 $B1/$V0
+TEST MOUNT_LOOP $LD2 $B2/$V0
+TEST MOUNT_LOOP $LD3 $B3/$V0
+
+#Tests without options 'mode=script' and 'wignore'
+cli1=$(echo $CLI1 | sed 's/ --mode=script//')
+cli1=$(echo $cli1 | sed 's/ --wignore//')
+#Case 0: Parent directory of the brick is absent
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2
+
+#Case 1: File system root being used as brick directory
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+
+#Case 2: Brick directory contains only one component
+TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2
+
+#add-brick tests
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/b3
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0
+TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3
+TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3
+
+#####replace-brick tests
+#FIX-ME: Replace-brick does not work currently in the newly introduced cluster
+#####test framework.
+
+$CLI1 volume stop $V0
+
+UMOUNT_LOOP $B1/$V0
+UMOUNT_LOOP $B2/$V0
+UMOUNT_LOOP $B3/$V0
+
+rm -f $B1/brick1
+rm -f $B2/brick2
+rm -f $B3/brick3
+
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-949930.t b/tests/bugs/glusterd/bug-949930.t
new file mode 100644
index 00000000000..9a6d38fa37f
--- /dev/null
+++ b/tests/bugs/glusterd/bug-949930.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+V1=patchy2
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume start $V0;
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
+TEST $CLI volume set $V1 nfs.disable off
+TEST $CLI volume start $V1;
+
+TEST ! $CLI volume set $V0 performance.nfs.read-ahead blah
+EXPECT '' volume_option $V0 performance.nfs.read-ahead
+
+TEST $CLI volume set $V0 performance.nfs.read-ahead on
+EXPECT "on" volume_option $V0 performance.nfs.read-ahead
+
+EXPECT '' volume_option $V1 performance.nfs.read-ahead
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/check_elastic_server.t b/tests/bugs/glusterd/check_elastic_server.t
new file mode 100644
index 00000000000..41d2140aa2b
--- /dev/null
+++ b/tests/bugs/glusterd/check_elastic_server.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function cluster_rebalance_status {
+ local vol=$1
+ $CLI_2 volume status | grep -iw "Rebalance" -A 5 | grep "Status" | sed 's/.*: //'
+}
+
+cleanup;
+TEST launch_cluster 4;
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+TEST $CLI_1 peer probe $H4;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+#Mount invalid volume
+TEST ! glusterfs -s $H1 --volfile-id=$V0_NA $M0;
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_remove_brick_status_completed_field "$V0 $H1:$B1/$V0"
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 commit
+
+kill_glusterd 1
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H3:$B3/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H4:$B4/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+kill_glusterd 2
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/daemon-log-level-option.t b/tests/bugs/glusterd/daemon-log-level-option.t
new file mode 100644
index 00000000000..66e55e3d758
--- /dev/null
+++ b/tests/bugs/glusterd/daemon-log-level-option.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+function Info_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " I " | wc -l
+}
+
+function Warning_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " W " | wc -l
+}
+
+function Debug_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " D " | wc -l
+}
+
+function Trace_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " T " | wc -l
+}
+
+cleanup;
+
+# Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+# set cluster.daemon-log-level option to DEBUG
+TEST $CLI volume set all cluster.daemon-log-level DEBUG
+
+#Create a 3X2 distributed-replicate volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+# stop the volume and remove glustershd log
+TEST $CLI volume stop $V0
+rm -f /var/log/glusterfs/glustershd.log
+
+# set cluster.daemon-log-level option to INFO and start the volume
+TEST $CLI volume set all cluster.daemon-log-level INFO
+TEST $CLI volume start $V0
+
+# log should not have any debug messages
+EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+# stop the volume and remove glustershd log
+TEST $CLI volume stop $V0
+rm -f /var/log/glusterfs/glustershd.log
+
+# set cluster.daemon-log-level option to WARNING and start the volume
+TEST $CLI volume set all cluster.daemon-log-level WARNING
+TEST $CLI volume start $V0
+
+# log should not have any info messages
+EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any debug messages
+EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+# stop the volume and remove glustershd log
+TEST $CLI volume stop $V0
+rm -f /var/log/glusterfs/glustershd.log
+
+# set cluster.daemon-log-level option to ERROR and start the volume
+TEST $CLI volume set all cluster.daemon-log-level ERROR
+TEST $CLI volume start $V0
+
+# log should not have any info messages
+EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any warning messages
+EXPECT 0 Warning_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any debug messages
+EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+cleanup
diff --git a/tests/bugs/glusterd/df-results-post-replace-brick-operations.t b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t
new file mode 100644
index 00000000000..04f75889388
--- /dev/null
+++ b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+TEST glusterd
+
+#Create brick partitions
+TEST truncate -s 100M $B0/brick1
+TEST truncate -s 100M $B0/brick2
+TEST truncate -s 100M $B0/brick3
+TEST truncate -s 100M $B0/brick4
+TEST truncate -s 100M $B0/brick5
+
+LO1=`SETUP_LOOP $B0/brick1`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO1
+
+LO2=`SETUP_LOOP $B0/brick2`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO2
+
+LO3=`SETUP_LOOP $B0/brick3`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO3
+
+LO4=`SETUP_LOOP $B0/brick4`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO4
+
+LO5=`SETUP_LOOP $B0/brick5`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO5
+
+TEST mkdir -p $B0/${V0}1 $B0/${V0}2 $B0/${V0}3 $B0/${V0}4 $B0/${V0}5
+TEST MOUNT_LOOP $LO1 $B0/${V0}1
+TEST MOUNT_LOOP $LO2 $B0/${V0}2
+TEST MOUNT_LOOP $LO3 $B0/${V0}3
+TEST MOUNT_LOOP $LO4 $B0/${V0}4
+TEST MOUNT_LOOP $LO5 $B0/${V0}5
+
+# create a subdirectory in mount point and use it for volume creation
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}1/brick1 $H0:$B0/${V0}2/brick1 $H0:$B0/${V0}3/brick1
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" online_brick_count
+
+# mount the volume and check the size at mount point
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
+
+# perform replace brick operations
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1/brick1 $H0:$B0/${V0}4/brick1 commit force
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2/brick1 $H0:$B0/${V0}5/brick1 commit force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# check for the size at mount point, it should be same as previous
+total_space_new=$(df -P $M0 | tail -1 | awk '{ print $2}')
+TEST [ $total_space -eq $total_space_new ]
diff --git a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
new file mode 100644
index 00000000000..8001359e6b3
--- /dev/null
+++ b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
@@ -0,0 +1,71 @@
+#! /bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup
+
+TEST launch_cluster 3
+
+TEST $CLI_1 peer probe $H2
+
+#bug-1109741 - validate mgmt handshake
+
+TEST ! $CLI_3 peer probe $H1
+
+GD1_WD=$($CLI_1 system getwd)
+OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+
+TEST $CLI_3 system uuid get # Needed for glusterd.info to be created
+
+GD3_WD=$($CLI_3 system getwd)
+TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info
+
+TEST kill_glusterd 3
+TEST start_glusterd 3
+
+TEST ! $CLI_3 peer probe $H1
+
+OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
+
+#bug-948686 - volume sync after bringing up the killed node
+
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+
+#kill a node
+TEST kill_node 3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 2
+
+#modify volume config to see change in volume-sync
+TEST $CLI_1 volume set $V0 write-behind off
+#add some files to the volume to see effect of volume-heal cmd
+TEST touch $M0/{1..100};
+TEST $CLI_1 volume stop $V0;
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 'Stopped' volinfo_field_1 $V0 'Status'
+
+TEST $glusterd_3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
+
+sleep 5
+TEST $CLI_3 volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
+TEST $CLI_2 volume stop $V0;
+TEST $CLI_2 volume delete $V0;
+
+cleanup
diff --git a/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t b/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t
new file mode 100644
index 00000000000..99272e14245
--- /dev/null
+++ b/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+#bug-1454418 - Setting Port number in specific range
+sysctl net.ipv4.ip_local_reserved_ports="24007-24008,32765-32768,49152-49156"
+
+TEST launch_cluster 4;
+
+#bug-1223213
+
+# Fool the cluster to operate with 3.5 version even though binary's op-version
+# is > 3.5. This is to ensure 3.5 code path is hit to test that volume status
+# works when a node is upgraded from 3.5 to 3.7 or higher as mgmt_v3 lock is
+# been introduced in 3.6 version and onwards
+
+GD1_WD=$($CLI_1 system getwd)
+$CLI_1 system uuid get
+Old_op_version=$(cat ${GD1_WD}/glusterd.info | grep operating-version | cut -d '=' -f 2)
+
+TEST sed -rnie "'s/(operating-version=)\w+/\130500/gip'" ${GD1_WD}/glusterd.info
+
+TEST kill_glusterd 1
+TEST start_glusterd 1
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+TEST `sed -i "s/"30500"/${Old_op_version}/g" ${GD1_WD}/glusterd.info`
+
+TEST kill_glusterd 1
+TEST start_glusterd 1
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+#bug-1454418
+sysctl net.ipv4.ip_local_reserved_ports="
+"
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+#bug-888752 - volume status --xml from peer in the cluster
+
+TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1
+
+# bug - 1635820
+# rebooting a node which doen't host bricks for any one volume
+# peer should not go into rejected state
+TEST kill_glusterd 2
+TEST start_glusterd 2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
+
+TEST $CLI_1 volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V1 'Status'
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+TEST $CLI_1 peer probe $H4;
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count 1
+
+#bug-1173414 - validate mgmt-v3-remote-lock-failure
+
+for i in {1..20}
+do
+$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
+$CLI_1 volume set $V1 barrier on
+$CLI_2 volume set $V0 diagnostics.client-log-level DEBUG &
+$CLI_2 volume set $V1 barrier on
+done
+
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count 1
+TEST $CLI_1 volume status
+TEST $CLI_2 volume status
+
+#bug-1293414 - validate peer detach
+
+# peers hosting bricks cannot be detached
+TEST ! $CLI_4 peer detach $H1
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count 1
+
+# peer not hosting bricks should be detachable
+TEST $CLI_4 peer detach $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#bug-1344407 - deleting a volume when peer is down should fail
+
+#volume should be stopped before deletion
+TEST $CLI_1 volume stop $V0
+
+TEST kill_glusterd 2
+TEST ! $CLI_1 volume delete $V0
+
+cleanup
diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
new file mode 100644
index 00000000000..b89ca22415e
--- /dev/null
+++ b/tests/bugs/glusterd/optimized-basic-testcases.t
@@ -0,0 +1,305 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+function get_opret_value () {
+ local VOL=$1
+ $CLI volume info $VOL --xml | sed -ne 's/.*<opRet>\([-0-9]*\)<\/opRet>/\1/p'
+}
+
+function check_brick()
+{
+ vol=$1;
+ num=$2
+ $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+function get_brick_host_uuid()
+{
+ local vol=$1;
+ local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
+ local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
+
+ echo $host_uuid_list | awk '{print $1}'
+}
+
+function generate_statedump_and_check_for_glusterd_info {
+ pid=`pidof glusterd`
+ #remove old stale statedumps
+ cleanup_statedump $pid
+ kill -USR1 $pid
+ #Wait till the statedump is generated
+ sleep 1
+ fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.")
+ cat $statedumpdir/$fname | grep "xlator.glusterd.priv" | wc -l
+}
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+#bug-1238135-lazy-daemon-initialization-on-demand
+
+GDWD=$($CLI system getwd)
+
+# glusterd.info file will be created on either first peer probe or volume
+# creation, hence we expect file to be not present in this case
+TEST ! -e $GDWD/glusterd.info
+
+#bug-913487 - setting volume options before creation of volume should fail
+
+TEST ! $CLI volume set $V0 performance.open-behind off;
+TEST pidof glusterd;
+
+#bug-1433578 - glusterd should not crash after probing a invalid peer
+
+TEST ! $CLI peer probe invalid-peer
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+#bug-1786478 - default volume option after volume reset
+addr_family=`volinfo_field $V0 'transport.address-family'`
+TEST $CLI volume reset $V0
+EXPECT $addr_family volinfo_field $V0 'transport.address-family'
+
+#bug-955588 - uuid validation
+
+uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
+EXPECT $uuid get_brick_host_uuid $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+#bug-958790 - set options from file
+
+touch $GLUSTERD_WORKDIR/groups/test
+echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
+echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 group test
+EXPECT "off" volume_option $V0 performance.read-ahead
+EXPECT "off" volume_option $V0 performance.open-behind
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#bug-1321836 - validate opret value for non existing volume
+
+EXPECT 0 get_opret_value $V0
+EXPECT -1 get_opret_value "novol"
+
+EXPECT '2' brick_count $V0
+
+#bug-862834 - validate brick status
+
+EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
+EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
+
+TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
+
+#bug-1482344 - setting volume-option-at-cluster-level should not result in glusterd crash
+
+TEST ! $CLI volume set all transport.listen-backlog 128
+
+# Check the volume info output, if glusterd would have crashed then this command
+# will fail
+TEST $CLI volume info $V0;
+
+#bug-1002556 and bug-1199451 - command should retrieve current op-version of the node
+TEST $CLI volume get all cluster.op-version
+
+#bug-1315186 - reject-lowering-down-op-version
+
+OP_VERS_ORIG=$(grep 'operating-version' ${GDWD}/glusterd.info | cut -d '=' -f 2)
+OP_VERS_NEW=`expr $OP_VERS_ORIG-1`
+
+TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
+
+#bug-1022055 - validate log rotate command
+
+TEST ! $CLI volume log rotate $V0;
+TEST $CLI volume log $V0 rotate;
+
+#bug-1092841 - validating barrier enable/disable
+
+TEST $CLI volume barrier $V0 enable;
+TEST ! $CLI volume barrier $V0 enable;
+
+TEST $CLI volume barrier $V0 disable;
+TEST ! $CLI volume barrier $V0 disable;
+
+#bug-1095097 - validate volume profile command
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info
+
+#bug-839595 - validate server-quorum options
+
+TEST $CLI volume set $V0 cluster.server-quorum-type server
+EXPECT "server" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume set $V0 cluster.server-quorum-type none
+EXPECT "none" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume reset $V0 cluster.server-quorum-type
+TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
+TEST ! $CLI volume set all cluster.server-quorum-type none
+TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
+
+TEST ! $CLI volume set all cluster.server-quorum-ratio abc
+TEST ! $CLI volume set all cluster.server-quorum-ratio -1
+TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
+TEST $CLI volume set all cluster.server-quorum-ratio 0
+EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100
+EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
+EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100%
+EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
+
+#bug-1265479 - validate-distributed-volume-options
+
+#Setting data-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 data-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
+TEST ! $CLI volume set $V0 cluster.data-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
+
+#Setting metadata-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 metadata-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
+TEST ! $CLI volume set $V0 cluster.metadata-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
+
+#Setting entry-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 entry-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
+TEST ! $CLI volume set $V0 cluster.entry-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
+
+#bug-1163108 - validate min-free-disk-option
+
+## Setting invalid value for option cluster.min-free-disk should fail
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk 143.!/12
+TEST ! $CLI volume set $V0 min-free-disk 123%
+TEST ! $CLI volume set $V0 min-free-disk 194.34%
+
+## Setting fractional value as a size (unit is byte) for option
+## cluster.min-free-disk should fail
+TEST ! $CLI volume set $V0 min-free-disk 199.051
+TEST ! $CLI volume set $V0 min-free-disk 111.999
+
+## Setting valid value for option cluster.min-free-disk should pass
+TEST $CLI volume set $V0 min-free-disk 12%
+TEST $CLI volume set $V0 min-free-disk 56.7%
+TEST $CLI volume set $V0 min-free-disk 120
+TEST $CLI volume set $V0 min-free-disk 369.0000
+
+#bug-1179175-uss-option-validation
+
+## Set features.uss option with non-boolean value. These non-boolean value
+## for features.uss option should fail.
+TEST ! $CLI volume set $V0 features.uss abcd
+TEST ! $CLI volume set $V0 features.uss #$#$
+TEST ! $CLI volume set $V0 features.uss 2324
+
+## Setting other options with valid value. These options should succeed.
+TEST $CLI volume set $V0 barrier enable
+TEST $CLI volume set $V0 ping-timeout 60
+
+## Set features.uss option with valid boolean value. It should succeed.
+TEST $CLI volume set $V0 features.uss enable
+TEST $CLI volume set $V0 features.uss disable
+
+
+## Setting other options with valid value. These options should succeed.
+TEST $CLI volume set $V0 barrier enable
+TEST $CLI volume set $V0 ping-timeout 60
+
+#bug-1209329 - daemon-svcs-on-reset-volume
+
+##enable the bitrot and verify bitd is running or not
+TEST $CLI volume bitrot $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.bitrot'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+##Do reset force which set the bitrot options to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_bitd_count
+
+##enable the uss option and verify snapd is running or not
+TEST $CLI volume set $V0 features.uss on
+EXPECT 'on' volinfo_field $V0 'features.uss'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
+
+##Do reset force which set the uss options to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
+
+##verify initial nfs disabled by default
+EXPECT "0" get_nfs_count
+
+##enable nfs and verify
+TEST $CLI volume set $V0 nfs.disable off
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+EXPECT "1" get_nfs_count
+
+##Do reset force which set the nfs.option to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
+
+##enable the uss option and verify snapd is running or not
+TEST $CLI volume set $V0 features.uss on
+EXPECT 'on' volinfo_field $V0 'features.uss'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
+
+##Disable the uss option using set command and verify snapd
+TEST $CLI volume set $V0 features.uss off
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
+
+##enable nfs.disable and verify
+TEST $CLI volume set $V0 nfs.disable on
+EXPECT 'on' volinfo_field $V0 'nfs.disable'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
+
+## disable nfs.disable option using set command
+TEST $CLI volume set $V0 nfs.disable off
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
+
+TEST $CLI volume info;
+TEST $CLI volume create $V1 $H0:$B0/${V1}1
+TEST $CLI volume start $V1
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+TEST $CLI volume status $V1
+
+#bug-853601 - Avoid using /var/lib/glusterd as a brick
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd force
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc force
+mkdir -p /xyz/var/lib/glusterd/abc
+
+#bug 1716812 - volfile should be created with transport type both
+TEST $CLI volume create "test" transport tcp,rdma $H0:/xyz/var/lib/glusterd/abc
+EXPECT 'Created' volinfo_field "test" 'Status';
+
+#While taking a statedump, there is a TRY_LOCK on call_frame, which might may cause
+#failure. So Adding a EXPECT_WITHIN
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" generate_statedump_and_check_for_glusterd_info
+
+cleanup_statedump `pidof glusterd`
+cleanup
diff --git a/tests/bugs/glusterd/quorum-validation.t b/tests/bugs/glusterd/quorum-validation.t
new file mode 100644
index 00000000000..3cc3351b43b
--- /dev/null
+++ b/tests/bugs/glusterd/quorum-validation.t
@@ -0,0 +1,122 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+#bug-1177132 - sync server quorum options when a node is brought up
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 52
+
+#Bring down 2nd glusterd
+TEST kill_glusterd 2
+EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
+
+#bug-1104642 - sync server quorum options when a node is brought up
+#set the volume all options from the 1st glusterd
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
+
+# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
+#(Command based on syncop framework)should fail
+TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2
+TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
+TEST ! $CLI_1 volume set $V0 barrier enable
+
+#quorum is not met, rebalance/profile start should fail
+TEST ! $CLI_1 volume rebalance $V0 start
+TEST ! $CLI_1 volume profile $V0 start
+
+#bug-1690753 - Volume stop when quorum not met is successful
+TEST ! $CLI_1 volume stop $V0
+
+#Bring back the 2nd glusterd
+TEST $glusterd_2
+
+#verify whether the value has been synced
+EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_1 all cluster.server-quorum-ratio
+EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_2 all cluster.server-quorum-ratio
+
+# Now quorum is meet.
+# Add-brick, Remove-brick, volume-set command should success
+TEST $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
+TEST $CLI_1 volume set $V0 barrier enable
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+## Stop the volume
+TEST $CLI_1 volume stop $V0
+
+## Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+## Now quorum is not meet. Starting volume on 1st node should not success
+TEST ! $CLI_1 volume start $V0
+
+## Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a cluster
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+## Now quorum is meet. Starting volume on 1st node should be success.
+TEST $CLI_1 volume start $V0
+
+# Now re-execute the same profile command and this time it should succeed
+TEST $CLI_1 volume profile $V0 start
+
+#bug-1352277
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type none
+
+# Bring down all the gluster processes
+TEST killall_gluster
+
+#bring back 1st glusterd and check whether the brick process comes back
+TEST $glusterd_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+
+#enabling quorum should bring down the brick
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+#bug-1367478 - brick processes should not be up when quorum is not met
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}1 $H2:$B2/${V1}2
+TEST $CLI_1 volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H1 $B1/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H2 $B2/${V1}2
+
+# Restart 2nd glusterd
+TEST kill_glusterd 2
+TEST $glusterd_2
+
+# Check if all bricks are up
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H1 $B1/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H2 $B2/${V1}2
+
+cleanup
diff --git a/tests/bugs/glusterd/rebalance-in-cluster.t b/tests/bugs/glusterd/rebalance-in-cluster.t
new file mode 100644
index 00000000000..469ec6cd48e
--- /dev/null
+++ b/tests/bugs/glusterd/rebalance-in-cluster.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function rebalance_status_field_1 {
+ $CLI_1 volume rebalance $1 status | awk '{print $7}' | sed -n 3p
+}
+
+cleanup;
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+#bug-1231437
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
+
+TEST $CLI_1 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+
+#bug - 1764119 - rebalance status should display detailed info when any of the node is dowm
+TEST kill_glusterd 2
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field_1 $V0
+
+TEST start_glusterd 2
+#bug-1245142
+
+$CLI_1 volume rebalance $V0 start &
+#kill glusterd2 after requst sent, so that call back is called
+#with rpc->status fail ,so roughly 1sec delay is introduced to get this scenario.
+sleep 1
+kill_glusterd 2
+#check glusterd commands are working after rebalance start command
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/rebalance-operations-in-single-node.t b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
new file mode 100644
index 00000000000..ef85887f440
--- /dev/null
+++ b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function get_rebalanced_info()
+{
+ local rebal_info_key=$2
+ $CLI volume rebalance $1 status | awk '{print $'$rebal_info_key'}' |sed -n 3p| sed 's/ *$//g'
+}
+
+volname="StartMigrationDuringRebalanceTest"
+TEST glusterd
+TEST pidof glusterd;
+
+TEST $CLI volume info;
+TEST $CLI volume create $volname $H0:$B0/${volname}{1..4};
+TEST $CLI volume start $volname;
+
+#bug-1046308 - validate rebalance on a specified volume name
+TEST $CLI volume rebalance $volname start;
+
+#bug-1089668 - validation of rebalance status and remove brick status
+#bug-963541 - after remove brick start rebalance/remove brick start without commiting should fail
+
+TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}1 status
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
+
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 start
+TEST ! $CLI volume rebalance $volname start
+TEST ! $CLI volume rebalance $volname status
+TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field \
+"$volname" "$H0:$B0/${volname}1"
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 commit
+
+TEST $CLI volume rebalance $volname start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
+TEST $CLI volume rebalance $volname stop
+
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 stop
+
+#bug-1351021-rebalance-info-post-glusterd-restart
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
+TEST $CLI volume start $V0;
+
+#Mount volume and create data
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+TEST mkdir $M0/dir{1..10}
+TEST touch $M0/dir{1..10}/file{1..10}
+
+# Add-brick and start rebalance
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4
+TEST $CLI volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+#Rebalance info before glusterd restart
+OLD_REBAL_FILES=$(get_rebalanced_info $V0 2)
+OLD_SIZE=$(get_rebalanced_info $V0 3)
+OLD_SCANNED=$(get_rebalanced_info $V0 4)
+OLD_FAILURES=$(get_rebalanced_info $V0 5)
+OLD_SKIPPED=$(get_rebalanced_info $V0 6)
+
+
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+
+#Rebalance info after glusterd restart
+NEW_REBAL_FILES=$(get_rebalanced_info $V0 2)
+NEW_SIZE=$(get_rebalanced_info $V0 3)
+NEW_SCANNED=$(get_rebalanced_info $V0 4)
+NEW_FAILURES=$(get_rebalanced_info $V0 5)
+NEW_SKIPPED=$(get_rebalanced_info $V0 6)
+#Check rebalance info before and after glusterd restart
+TEST [ $OLD_REBAL_FILES == $NEW_REBAL_FILES ]
+TEST [ $OLD_SIZE == $NEW_SIZE ]
+TEST [ $OLD_SCANNED == $NEW_SCANNED ]
+TEST [ $OLD_FAILURES == $NEW_FAILURES ]
+TEST [ $OLD_SKIPPED == $NEW_SKIPPED ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#bug-1004744 - validation of rebalance fix layout
+
+TEST $CLI volume start $V0 force
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+for i in `seq 11 20`;
+do
+ mkdir $M0/dir_$i
+ echo file>$M0/dir_$i/file_$i
+ for j in `seq 1 100`;
+ do
+ mkdir $M0/dir_$i/dir_$j
+ echo file>$M0/dir_$i/dir_$j/file_$j
+ done
+done
+
+#add 2 bricks
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{5,6};
+
+#perform rebalance fix-layout
+TEST $CLI volume rebalance $V0 fix-layout start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0;
+
+#bug-1075087 - rebalance post add brick
+TEST mkdir $M0/dir{21..30};
+TEST touch $M0/dir{21..30}/files{1..10};
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{7,8}
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN 180 "completed" rebalance_status_field $V0
+
+TEST pkill gluster
+TEST glusterd
+TEST pidof glusterd
+
+# status should be "completed" immediate after glusterd has respawned.
+EXPECT_WITHIN 20 "completed" rebalance_status_field $V0
+
+cleanup
diff --git a/tests/bugs/glusterd/remove-brick-in-cluster.t b/tests/bugs/glusterd/remove-brick-in-cluster.t
new file mode 100644
index 00000000000..de94220a906
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-in-cluster.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2;
+
+#bug-1047955 - remove brick from new peer in cluster
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
+TEST $CLI_1 volume start $V0;
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
+TEST $CLI_2 volume info
+
+#bug-964059 - volume status post remove brick start
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}0 $H2:$B2/${V1}1
+TEST $CLI_1 volume start $V1
+TEST $CLI_1 volume remove-brick $V1 $H2:$B2/${V1}1 start
+TEST $CLI_1 volume status
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+#bug-1230121 - decrease replica count by remove-brick and increse by add-brick
+## Creating a 2x3 replicate volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick2 \
+ $H1:$B1/brick3 $H2:$B2/brick4 \
+ $H1:$B1/brick5 $H2:$B2/brick6
+
+## Start the volume
+TEST $CLI_1 volume start $V0
+
+## Shrinking volume replica 2x3 to 2x2 by performing remove-brick operation.
+TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick1 $H2:$B2/brick6 force
+
+## Shrinking volume replica 2x2 to 1x2 by performing remove-brick operation
+TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 force
+
+## Shrinking volume replica from 1x2 to 1x1 by performing remove-brick operation
+TEST $CLI_1 volume remove-brick $V0 replica 1 $H1:$B1/brick5 force
+
+
+### Expanding volume replica by performing add-brick operation.
+
+## Expend volume replica from 1x1 to 1x2 by performing add-brick operation
+TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/brick5 force
+
+## Expend volume replica from 1x2 to 2x2 by performing add-brick operation
+TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 force
+
+## Expend volume replica from 2x2 to 2x3 by performing add-brick operation
+TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick6 force
+
+cleanup
+
diff --git a/tests/bugs/glusterd/remove-brick-testcases.t b/tests/bugs/glusterd/remove-brick-testcases.t
new file mode 100644
index 00000000000..2f982d5266f
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-testcases.t
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..5}
+TEST $CLI volume start $V0
+
+#bug-1225716 - remove-brick on a brick which is down should fail
+#kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}1
+
+#remove-brick start should fail as the brick is down
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+#remove-brick start should succeed as the brick is up
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}1"
+
+#kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+
+#remove-brick commit should pass even if the brick is down
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
+
+#bug-1121584 - brick-existing-validation-for-remove-brick-status-stop
+## Start remove-brick operation on the volume
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+
+## By giving non existing brick for remove-brick status/stop command should
+## give error.
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD status
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD stop
+
+## By giving brick which is part of volume for remove-brick status/stop command
+## should print statistics of remove-brick operation or stop remove-brick
+## operation.
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 status
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
+
+#bug-878004 - validate remove brick force
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
+EXPECT '3' brick_count $V0
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force;
+EXPECT '2' brick_count $V0
+
+#bug-1027171 - Do not allow commit if the bricks are not decommissioned
+#Remove bricks and commit without starting
+function remove_brick_commit_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}4 commit 2>&1 |grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status;
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+
+#Create a 2X3 distributed-replicate volume
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+#Try to reduce replica count with start option
+function remove_brick_start_status {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}3 $H0:$B0/${V0}6 start 2>&1 |grep -oE "success|failed"
+}
+EXPECT "failed" remove_brick_start_status;
+
+#Remove bricks with commit option
+function remove_brick_commit_status2 {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}3 $H0:$B0/${V0}6 commit 2>&1 |
+ grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status2;
+TEST $CLI volume info $V0
+
+#bug-1040408 - reduce replica count of distributed replicate volume
+
+# Reduce to 2x2 volume by specifying bricks in reverse order
+function remove_brick_status {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_status;
+TEST $CLI volume info $V0
+
+#bug-1120647 - remove brick validation
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} start
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}5"
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}4"
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} commit
+TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/${V0}2 force
+
+cleanup
diff --git a/tests/bugs/glusterd/remove-brick-validation.t b/tests/bugs/glusterd/remove-brick-validation.t
new file mode 100644
index 00000000000..a0ff4ff6a24
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-validation.t
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+## start a 3 node virtual cluster
+TEST launch_cluster 3;
+
+## peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+#testcase: bug-1245045-remove-brick-validation
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+kill_glusterd 2
+
+#remove-brick should fail as the peer hosting the brick is down
+TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#volume status should work
+TEST $CLI_2 volume status
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+kill_glusterd 2
+
+#remove-brick commit should fail as the peer hosting the brick is down
+TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} commit
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#volume status should work
+TEST $CLI_2 volume status
+
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} stop
+
+kill_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+
+TEST start_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+TEST $CLI_3 volume status
+
+cleanup
diff --git a/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
new file mode 100644
index 00000000000..00beab59137
--- /dev/null
+++ b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a 3X2 distributed-replicate volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+#bug-974007 - remove multiple replica pairs in a single brick command
+# Mount FUSE and create files
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file{1..10}
+
+# Remove bricks from two sub-volumes to make it a 1x2 vol.
+# Bricks in question are given in a random order but from the same subvols.
+function remove_brick_start_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}5 start 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_start_status;
+
+# Wait for rebalance to complete
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}6 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}5"
+
+# Check commit status
+function remove_brick_commit_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}5 commit 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_commit_status;
+
+
+# Check the volume type
+EXPECT "Replicate" echo `$CLI volume info |grep Type |awk '{print $2}'`
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#bug-961669 - remove brick start should fail when reducing the replica count
+
+#Create a 3x3 dist-rep volume
+TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{0,1,2,3,4,5,6,7,8};
+TEST $CLI volume start $V1
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "9" brick_count ${V1}
+
+# Mount FUSE and create file/directory
+TEST glusterfs -s $H0 --volfile-id $V1 $M0
+TEST touch $M0/zerobytefile.txt
+TEST mkdir $M0/test_dir
+TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
+
+function remove_brick_start {
+ $CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} start 2>&1|grep -oE 'success|failed'
+}
+
+function remove_brick {
+ $CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} force 2>&1|grep -oE 'success|failed'
+}
+
+#remove-brick start variant
+#Actual message displayed at cli is:
+#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
+EXPECT "failed" remove_brick_start;
+
+#remove-brick commit-force
+#Actual message displayed at cli is:
+#"volume remove-brick commit force: success"
+EXPECT "success" remove_brick
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup;
diff --git a/tests/bugs/glusterd/replace-brick-operations.t b/tests/bugs/glusterd/replace-brick-operations.t
new file mode 100644
index 00000000000..044aa3d6c6d
--- /dev/null
+++ b/tests/bugs/glusterd/replace-brick-operations.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+## Test case for BZ: 1094119 Remove replace-brick support from gluster
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd
+
+## Lets create and start volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0
+
+#bug-1094119-remove-replace-brick-support-from-glusterd
+
+## Now with this patch replace-brick only accept following commad
+## volume replace-brick <VOLNAME> <SOURCE-BRICK> <NEW-BRICK> {commit force}
+## Apart form this replace brick command will failed.
+
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 start
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 status
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 abort
+
+
+## replace-brick commit force command should success
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 commit force
+
+#bug-1242543-replace-brick validation
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Replace brick1 without killing
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST kill_brick $V0 $H0 $B0/${V0}1_new
+
+# Replace brick1 after killing the brick
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_newer commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+cleanup;
diff --git a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
new file mode 100644
index 00000000000..e6e65c48456
--- /dev/null
+++ b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function shd_up_status_1 {
+ $CLI_1 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $7}'
+}
+
+function shd_up_status_2 {
+ $CLI_2 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $7}'
+}
+
+function get_shd_pid_2 {
+ $CLI_2 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $8}'
+}
+
+cleanup;
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+TEST launch_cluster 3
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B0/${V0} $H2:$B0/${V0}
+TEST $CLI_1 volume start $V0
+
+#testcase: bug-1507466 - validate reset-brick commit force
+# Negative case with brick not killed && volume-id xattrs present
+TEST ! $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
+
+TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} start
+# Now test if reset-brick commit force works
+TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
+
+#testcase: bug-1383893 - shd should not come up after restarting the peer glusterd
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B0/${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B0/${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2
+
+# Bring down shd on 2nd node
+kill -15 $(get_shd_pid_2)
+
+# Bring down glusterd on 1st node
+TEST kill_glusterd 1
+
+#Bring back 1st glusterd
+TEST $glusterd_1
+
+# We need to wait till PROCESS_UP_TIMEOUT and then check shd service started
+#on node 2, because once glusterd regains quorum, it will restart all volume
+#level daemons
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2
+
+cleanup;
diff --git a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
new file mode 100644
index 00000000000..a871e112d87
--- /dev/null
+++ b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
@@ -0,0 +1,54 @@
+#! /bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+count=`$CLI_3 peer status | grep 'Peer in Cluster (Connected)' | wc -l`
+echo $count
+}
+
+function check_shd {
+ps aux | grep $1 | grep glustershd | wc -l
+}
+
+cleanup
+
+
+TEST launch_cluster 6
+
+TESTS_EXPECTED_IN_LOOP=25
+for i in $(seq 2 6); do
+ hostname="H$i"
+ TEST $CLI_1 peer probe ${!hostname}
+done
+
+
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers;
+for i in $(seq 1 5); do
+
+ TEST $CLI_1 volume create ${V0}_$i replica 3 $H1:$B1/${V0}_$i $H2:$B2/${V0}_$i $H3:$B3/${V0}_$i $H4:$B4/${V0}_$i $H5:$B5/${V0}_$i $H6:$B6/${V0}_$i
+ TEST $CLI_1 volume start ${V0}_$i force
+
+done
+
+#kill a node
+TEST kill_node 3
+
+TEST $glusterd_3;
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_shd $H3
+
+for i in $(seq 1 5); do
+
+ TEST $CLI_1 volume stop ${V0}_$i
+ TEST $CLI_1 volume delete ${V0}_$i
+
+done
+
+for i in $(seq 1 6); do
+ hostname="H$i"
+ EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 0 check_shd ${!hostname}
+done
+cleanup
diff --git a/tests/bugs/glusterd/snapshot-operations.t b/tests/bugs/glusterd/snapshot-operations.t
new file mode 100644
index 00000000000..4705577d741
--- /dev/null
+++ b/tests/bugs/glusterd/snapshot-operations.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+
+cleanup;
+
+TEST verify_lvm_version
+TEST launch_cluster 3;
+TEST setup_lvm 3;
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1 $H2:$L2
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#bug-1318591 - skip-non-directories-inside-vols
+
+b="B1"
+TEST touch ${!b}/glusterd/vols/file
+
+TEST $CLI_1 snapshot create snap1 $V0 no-timestamp;
+
+TEST touch ${!b}/glusterd/snaps/snap1/file
+
+#bug-1322145 - peer hosting snapshotted bricks should not be detachable
+
+kill_glusterd 2
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume replace-brick $V0 $H2:$L2 $H3:$L3 commit force
+
+# peer hosting snapshotted bricks should not be detachable
+TEST ! $CLI_1 peer detach $H2
+
+TEST killall_gluster
+TEST $glusterd_1
+TEST $glusterd_2
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/sync-post-glusterd-restart.t b/tests/bugs/glusterd/sync-post-glusterd-restart.t
new file mode 100644
index 00000000000..de3dff715ab
--- /dev/null
+++ b/tests/bugs/glusterd/sync-post-glusterd-restart.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function volume_get_field()
+{
+ local vol=$1
+ local field=$2
+ $CLI_2 volume get $vol $field | tail -1 | awk '{print $2}'
+}
+
+cleanup
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume set $V0 performance.readdir-ahead on
+
+# Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+##bug-1420637 and bug-1323287 - sync post glusterd restart
+
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 60
+TEST $CLI_1 volume set $V0 performance.readdir-ahead off
+TEST $CLI_1 volume set $V0 performance.write-behind off
+
+# Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a cluster
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+#bug-1420637-volume sync post glusterd restart
+
+EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio
+EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead
+
+#bug-1323287
+EXPECT_WITHIN $PROBE_TIMEOUT 'off' volume_get_field $V0 'write-behind'
+
+#bug-1213295 - volume stop should not crash glusterd post glusterd restart
+
+TEST $CLI_2 volume stop $V0
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+
+cleanup
diff --git a/tests/bugs/glusterd/validating-options-for-replicated-volume.t b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
new file mode 100644
index 00000000000..ddc80b17870
--- /dev/null
+++ b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
@@ -0,0 +1,142 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#bug-1314649 - validate group virt
+TEST $CLI volume set $V0 group virt;
+
+#bug-765230 - remove-quota-related-option-after-disabling-quota
+## setting soft-timeout as 20
+TEST $CLI volume set $V0 features.soft-timeout 20
+EXPECT '20' volinfo_field $V0 'features.soft-timeout';
+
+## enabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+## enabling quota
+TEST $CLI volume quota $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.quota'
+
+## eetting soft-timeout as 20
+TEST $CLI volume set $V0 features.soft-timeout 20
+EXPECT '20' volinfo_field $V0 'features.soft-timeout';
+
+## enabling features.quota-deem-statfs
+TEST $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+## disabling quota
+TEST $CLI volume quota $V0 disable
+EXPECT 'off' volinfo_field $V0 'features.quota'
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+EXPECT '' volinfo_field $V0 'features.soft-timeout'
+
+## setting soft-timeout as 30
+TEST $CLI volume set $V0 features.soft-timeout 30
+EXPECT '30' volinfo_field $V0 'features.soft-timeout';
+
+## disabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs off
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+TEST ! $CLI volume set $V0 statedump-path ""
+TEST ! $CLI volume set $V0 statedump-path " "
+TEST $CLI volume set $V0 statedump-path "/home/"
+EXPECT "/home/" volume_option $V0 server.statedump-path
+
+TEST ! $CLI volume set $V0 background-self-heal-count ""
+TEST ! $CLI volume set $V0 background-self-heal-count " "
+TEST $CLI volume set $V0 background-self-heal-count 10
+EXPECT "10" volume_option $V0 cluster.background-self-heal-count
+
+TEST ! $CLI volume set $V0 io-cache-size ""
+TEST ! $CLI volume set $V0 io-cache-size " "
+TEST $CLI volume set $V0 io-cache-size 64MB
+EXPECT "64MB" volume_option $V0 performance.io-cache-size
+
+TEST ! $CLI volume set $V0 quick-read-cache-size ""
+TEST ! $CLI volume set $V0 quick-read-cache-size " "
+TEST $CLI volume set $V0 quick-read-cache-size 512MB
+EXPECT "512MB" volume_option $V0 performance.quick-read-cache-size
+
+TEST ! $CLI volume set $V0 self-heal-daemon ""
+TEST ! $CLI volume set $V0 self-heal-daemon " "
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT "on" volume_option $V0 cluster.self-heal-daemon
+
+TEST ! $CLI volume set $V0 read-subvolume ""
+TEST ! $CLI volume set $V0 read-subvolume " "
+TEST $CLI volume set $V0 read-subvolume $V0-client-0
+EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
+
+TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
+TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
+TEST ! $CLI volume set $V0 data-self-heal-algorithm on
+TEST $CLI volume set $V0 data-self-heal-algorithm full
+EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
+
+TEST ! $CLI volume set $V0 min-free-inodes ""
+TEST ! $CLI volume set $V0 min-free-inodes " "
+TEST $CLI volume set $V0 min-free-inodes 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-inodes
+
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk " "
+TEST $CLI volume set $V0 min-free-disk 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-disk
+
+TEST $CLI volume set $V0 min-free-disk 120
+EXPECT "120" volume_option $V0 cluster.min-free-disk
+
+TEST ! $CLI volume set $V0 frame-timeout ""
+TEST ! $CLI volume set $V0 frame-timeout " "
+TEST $CLI volume set $V0 frame-timeout 0
+EXPECT "0" volume_option $V0 network.frame-timeout
+
+TEST ! $CLI volume set $V0 auth.allow ""
+TEST ! $CLI volume set $V0 auth.allow " "
+TEST $CLI volume set $V0 auth.allow 192.168.122.1
+EXPECT "192.168.122.1" volume_option $V0 auth.allow
+
+#bug-782095 - validate performance cache min/max size value
+
+## setting performance cache min size as 2MB
+TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## setting performance cache max size as 20MB
+TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## trying to set performance cache min size as 25MB
+TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## able to set performance cache min size as long as its lesser than max size
+TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
+EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## trying it out with only cache-max-file-size in CLI as 10MB
+TEST ! $CLI volume set $V0 cache-max-file-size 10MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup
diff --git a/tests/bugs/glusterd/validating-server-quorum.t b/tests/bugs/glusterd/validating-server-quorum.t
new file mode 100644
index 00000000000..ae7d83fd81c
--- /dev/null
+++ b/tests/bugs/glusterd/validating-server-quorum.t
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_fs {
+ df $1 &> /dev/null
+ echo $?
+}
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 3
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Lets create the volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}1 $H2:$B2/${V0}2 $H3:$B3/${V0}3
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+
+# Start the volume
+TEST $CLI_1 volume start $V0
+
+#bug-1345727 - bricks should be down when quorum is not met
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H3 $B3/${V0}3
+
+# Bring down glusterd on 2nd node
+TEST kill_glusterd 2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST kill_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
+
+# Server quorum is not met. Brick on 1st node must be down
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+
+# Set quorum ratio 95. means 95 % or more than 95% nodes of total available node
+# should be available for performing volume operation.
+# i.e. Server-side quorum is met if the number of nodes that are available is
+# greater than or equal to 'quorum-ratio' times the number of nodes in the
+# cluster
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 95
+
+#bug-1483058 - replace-brick should fail when quorum is not met
+TEST ! $CLI_1 volume replace-brick $V0 $H2:$B2/${V0}2 $H1:$B1/${V0}2_new commit force
+
+#Bring back 2nd glusterd
+TEST $glusterd_2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+# Server quorum is still not met. Bricks should be down on 1st and 2nd nodes
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2
+
+# Bring back 3rd glusterd
+TEST $glusterd_3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Server quorum is met now. Bricks should be up on all nodes
+# Check from 3rd instance of glusterd so that the 3rd node finishes all its
+# handshake and then report back the brick status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H3 $B3/${V0}3
+
+# Check from 1st instance of glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H3 $B3/${V0}3
+
+# TODO : Because commit fe71ee7 introduced a delay of 1 sec to wait for shd connect and
+# disconnect events to be serially processed during a restart of shd daemon,
+# this introduced a race where while releasing big lock, if any command sneaks
+# and acquires the big lock, it might be able to work on a volinfo which is
+# stale. We need to find a better way to fix this.
+
+sleep 3
+
+# quorum is met. replace-brick will execute successfully
+EXPECT_WITHIN $PEER_SYNC_TIMEOUT 0 attempt_replace_brick 1 $V0 $H2:$B2/${V0}2 $H2:$B2/${V0}2_new
+
+TEST $CLI_1 volume reset all
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2_new
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H3 $B3/${V0}3
+
+
+#bug-913555 - volume should become unwritable when quorum does not met
+
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+# Kill one pseudo-node, make sure the others survive and volume stays up.
+TEST kill_node 3;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2_new
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+# Kill another pseudo-node, make sure the last one dies and volume goes down.
+TEST kill_node 2;
+EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
+#two glusterfsds of the other two glusterds must be dead
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_fs $M0;
+
+TEST $glusterd_2;
+TEST $glusterd_3;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+cleanup