summaryrefslogtreecommitdiffstats
path: root/tests/bugs/glusterd
diff options
context:
space:
mode:
authorNiels de Vos <ndevos@redhat.com>2014-12-26 12:57:48 +0100
committerVijay Bellur <vbellur@redhat.com>2015-01-06 03:24:24 -0800
commit64954eb3c58f4ef077e54e8a3726fd2d27419b12 (patch)
tree52cd5a39bbfda7442a5f0955ac2800b74a45b58a /tests/bugs/glusterd
parentc4ab37c02e9edc23d0637e23d6f2b42d0827dad2 (diff)
tests: move all test-cases into component subdirectories
There are around 300 regression tests, 250 being in tests/bugs. Running partial set of tests/bugs is not easy because this is a flat directory with almost all tests inside. It would be valuable to make partial test/bugs easier, and allow the use of mulitple build hosts for a single commit, each running a subset of the tests for a quicker result. Additional changes made: - correct the include path for *.rc shell libraries and *.py utils - make the testcases pass checkpatch - arequal-checksum in afr/self-heal.t was never executed, now it is - include.rc now complains loudly if it fails to find env.rc Change-Id: I26ffd067e9853d3be1fd63b2f37d8aa0fd1b4fea BUG: 1178685 Reported-by: Emmanuel Dreyfus <manu@netbsd.org> Reported-by: Atin Mukherjee <amukherj@redhat.com> URL: http://www.gluster.org/pipermail/gluster-devel/2014-December/043414.html Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/9353 Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Reviewed-by: Emmanuel Dreyfus <manu@netbsd.org> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'tests/bugs/glusterd')
-rwxr-xr-xtests/bugs/glusterd/859927/repl.t69
-rwxr-xr-xtests/bugs/glusterd/bug-000000.t9
-rwxr-xr-xtests/bugs/glusterd/bug-1002556.t25
-rw-r--r--tests/bugs/glusterd/bug-1004744.t46
-rwxr-xr-xtests/bugs/glusterd/bug-1022055.t26
-rw-r--r--tests/bugs/glusterd/bug-1027171.t53
-rw-r--r--tests/bugs/glusterd/bug-1040408.t31
-rw-r--r--tests/bugs/glusterd/bug-1046308.t19
-rw-r--r--tests/bugs/glusterd/bug-1047955.t23
-rwxr-xr-xtests/bugs/glusterd/bug-1070734.t74
-rw-r--r--tests/bugs/glusterd/bug-1075087.t33
-rwxr-xr-xtests/bugs/glusterd/bug-1085330.t80
-rw-r--r--tests/bugs/glusterd/bug-1087203.t103
-rwxr-xr-xtests/bugs/glusterd/bug-1089668.t27
-rwxr-xr-xtests/bugs/glusterd/bug-1090042.t30
-rwxr-xr-xtests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t27
-rw-r--r--tests/bugs/glusterd/bug-1092841.t24
-rwxr-xr-xtests/bugs/glusterd/bug-1095097.t21
-rw-r--r--tests/bugs/glusterd/bug-1102656.t20
-rw-r--r--tests/bugs/glusterd/bug-1104642.t47
-rw-r--r--tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t50
-rw-r--r--tests/bugs/glusterd/bug-1109770.t65
-rw-r--r--tests/bugs/glusterd/bug-1109889.t74
-rw-r--r--tests/bugs/glusterd/bug-1111041.t36
-rwxr-xr-xtests/bugs/glusterd/bug-1112559.t61
-rw-r--r--tests/bugs/glusterd/bug-1112613.t49
-rw-r--r--tests/bugs/glusterd/bug-1113975.t38
-rw-r--r--tests/bugs/glusterd/bug-1120647.t17
-rw-r--r--tests/bugs/glusterd/bug-1140162-file-snapshot-and-features-encryption-option-validation.t33
-rwxr-xr-xtests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t34
-rwxr-xr-xtests/bugs/glusterd/bug-765230.t60
-rwxr-xr-xtests/bugs/glusterd/bug-782095.t48
-rw-r--r--tests/bugs/glusterd/bug-824753-file-locker.c42
-rwxr-xr-xtests/bugs/glusterd/bug-824753.t45
-rw-r--r--tests/bugs/glusterd/bug-839595.t31
-rw-r--r--tests/bugs/glusterd/bug-857330/common.rc55
-rwxr-xr-xtests/bugs/glusterd/bug-857330/normal.t79
-rwxr-xr-xtests/bugs/glusterd/bug-857330/xml.t103
-rwxr-xr-xtests/bugs/glusterd/bug-859927.t70
-rwxr-xr-xtests/bugs/glusterd/bug-862834.t46
-rw-r--r--tests/bugs/glusterd/bug-878004.t29
-rw-r--r--tests/bugs/glusterd/bug-888752.t24
-rwxr-xr-xtests/bugs/glusterd/bug-889630.t56
-rw-r--r--tests/bugs/glusterd/bug-905307.t36
-rw-r--r--tests/bugs/glusterd/bug-913487.t14
-rwxr-xr-xtests/bugs/glusterd/bug-913555.t54
-rwxr-xr-xtests/bugs/glusterd/bug-916549.t19
-rwxr-xr-xtests/bugs/glusterd/bug-948686.t46
-rw-r--r--tests/bugs/glusterd/bug-948729/bug-948729-force.t103
-rw-r--r--tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t77
-rw-r--r--tests/bugs/glusterd/bug-948729/bug-948729.t80
-rw-r--r--tests/bugs/glusterd/bug-949930.t27
-rwxr-xr-xtests/bugs/glusterd/bug-955588.t27
-rw-r--r--tests/bugs/glusterd/bug-958790.t21
-rw-r--r--tests/bugs/glusterd/bug-961669.t48
-rwxr-xr-xtests/bugs/glusterd/bug-963541.t33
-rwxr-xr-xtests/bugs/glusterd/bug-964059.t30
-rw-r--r--tests/bugs/glusterd/bug-974007.t52
58 files changed, 2599 insertions, 0 deletions
diff --git a/tests/bugs/glusterd/859927/repl.t b/tests/bugs/glusterd/859927/repl.t
new file mode 100755
index 00000000000..db96d70bfa6
--- /dev/null
+++ b/tests/bugs/glusterd/859927/repl.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd
+
+#Tests for data-self-heal-algorithm option
+function create_setup_for_self_heal {
+ file=$1
+ kill_brick $V0 $H0 $B0/${V0}1
+ dd of=$file if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null
+ $CLI volume start $V0 force
+}
+
+function test_write {
+ dd of=$M0/a if=/dev/urandom bs=1k count=1 2>&1 > /dev/null
+}
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 client-log-level DEBUG
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0;
+
+touch $M0/a
+
+TEST $CLI volume set $V0 cluster.data-self-heal-algorithm full
+EXPECT full volume_option $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+cat $file 2>&1 > /dev/null
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST $CLI volume set $V0 cluster.data-self-heal-algorithm diff
+EXPECT diff volume_option $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+cat $file 2>&1 > /dev/null
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST $CLI volume reset $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+cat $file 2>&1 > /dev/null
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST ! $CLI volume set $V0 cluster.data-self-heal-algorithm ""
+
+#Tests for quorum-type option
+TEST ! $CLI volume set $V0 cluster.quorum-type ""
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+EXPECT fixed volume_option $V0 cluster.quorum-type
+TEST $CLI volume set $V0 cluster.quorum-count 2
+kill_brick $V0 $H0 $B0/${V0}1
+TEST ! test_write
+TEST $CLI volume set $V0 cluster.quorum-type auto
+EXPECT auto volume_option $V0 cluster.quorum-type
+TEST ! test_write
+TEST $CLI volume set $V0 cluster.quorum-type none
+EXPECT none volume_option $V0 cluster.quorum-type
+TEST test_write
+TEST $CLI volume reset $V0 cluster.quorum-type
+TEST test_write
+cleanup;
diff --git a/tests/bugs/glusterd/bug-000000.t b/tests/bugs/glusterd/bug-000000.t
new file mode 100755
index 00000000000..55f7b11f598
--- /dev/null
+++ b/tests/bugs/glusterd/bug-000000.t
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1002556.t b/tests/bugs/glusterd/bug-1002556.t
new file mode 100755
index 00000000000..ac71d06d533
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1002556.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}1 force
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST killall glusterd
+TEST glusterd
+
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+cleanup
diff --git a/tests/bugs/glusterd/bug-1004744.t b/tests/bugs/glusterd/bug-1004744.t
new file mode 100644
index 00000000000..b48ed97fb52
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1004744.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+#Test case: After a rebalance fix-layout, check if the rebalance status command
+#displays the appropriate message at the CLI.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a 2x1 distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0
+
+# Mount FUSE and create file/directory
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+for i in `seq 1 10`;
+do
+ mkdir $M0/dir_$i
+ echo file>$M0/dir_$i/file_$i
+ for j in `seq 1 100`;
+ do
+ mkdir $M0/dir_$i/dir_$j
+ echo file>$M0/dir_$i/dir_$j/file_$j
+ done
+done
+
+#add 2 bricks
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{3,4};
+
+#perform rebalance fix-layout
+TEST $CLI volume rebalance $V0 fix-layout start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1022055.t b/tests/bugs/glusterd/bug-1022055.t
new file mode 100755
index 00000000000..9f39c80b6b6
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1022055.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0;
+
+TEST $CLI_1 volume start $V0;
+
+TEST $CLI_1 volume log rotate $V0;
+
+TEST $CLI_1 volume status;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1027171.t b/tests/bugs/glusterd/bug-1027171.t
new file mode 100644
index 00000000000..1b457d8f660
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1027171.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+#Test case: Do not allow commit if the bricks are not decommissioned
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a Distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+#Remove bricks and commit without starting
+function remove_brick_commit_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}2 commit 2>&1 |grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status;
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST ! $CLI volume info $V0
+
+#Create a Distributed-Replicate volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..4};
+TEST $CLI volume start $V0
+
+#Try to reduce replica count with start option
+function remove_brick_start_status {
+ $CLI volume remove-brick $V0 replica 1 \
+ $H0:$B0/${V0}1 $H0:$B0/${V0}3 start 2>&1 |grep -oE "success|failed"
+}
+EXPECT "failed" remove_brick_start_status;
+
+#Remove bricks with commit option
+function remove_brick_commit_status2 {
+ $CLI volume remove-brick $V0 replica 1 \
+ $H0:$B0/${V0}1 $H0:$B0/${V0}3 commit 2>&1 |
+ grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status2;
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST ! $CLI volume info $V0
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1040408.t b/tests/bugs/glusterd/bug-1040408.t
new file mode 100644
index 00000000000..c378000630b
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1040408.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+#Test case: Create a distributed replicate volume, and reduce
+#replica count
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a 2X3 distributed-replicate volume
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+# Reduce to 2x2 volume by specifying bricks in reverse order
+function remove_brick_status {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_status;
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1046308.t b/tests/bugs/glusterd/bug-1046308.t
new file mode 100644
index 00000000000..9c827c4a492
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1046308.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+volname="StartMigrationDuringRebalanceTest"
+TEST glusterd
+TEST pidof glusterd;
+
+TEST $CLI volume info;
+TEST $CLI volume create $volname $H0:$B0/${volname}{1,2};
+TEST $CLI volume start $volname;
+TEST $CLI volume rebalance $volname start;
+
+cleanup;
+
+
+
diff --git a/tests/bugs/glusterd/bug-1047955.t b/tests/bugs/glusterd/bug-1047955.t
new file mode 100644
index 00000000000..a409d9f7195
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1047955.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+# Create a 2x2 dist-rep volume; peer probe a new node.
+# Performing remove-brick from this new node must succeed
+# without crashing it's glusterd
+
+TEST launch_cluster 2;
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
+TEST $CLI_1 volume start $V0;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
+TEST $CLI_2 volume info
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1070734.t b/tests/bugs/glusterd/bug-1070734.t
new file mode 100755
index 00000000000..b5a53c24cab
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1070734.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0;
+
+############################################################################
+#TEST-PLAN:
+#Create a directory DIR and a file inside DIR
+#check the hash brick of the file
+#delete the directory for recreating later after remove-brick
+#remove the brick where the files hashed to
+#After remove-brick status says complete go on creating the same directory \
+#DIR and file
+#Check if the file now falls into the other brick
+#Check if the other brick gets the full layout and the remove brick gets \
+#the zeroed layout
+############################################################################
+
+TEST mkdir $N0/DIR;
+
+TEST touch $N0/DIR/file;
+
+if [ -f $B0/${V0}1/DIR/file ]
+then
+ HASHED=$B0/${V0}1;
+ OTHERBRICK=$B0/${V0}2;
+else
+ HASHED=$B0/${V0}2;
+ OTHERBRICK=$B0/${V0}1;
+fi
+
+TEST rm -f $N0/DIR/file;
+TEST rmdir $N0/DIR;
+TEST $CLI volume remove-brick $V0 $H0:${HASHED} start;
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \
+"$H0:${HASHED}";
+
+TEST mkdir $N0/DIR;
+TEST touch $N0/DIR/file;
+
+#Check now the file should fall in to OTHERBRICK
+TEST [ -f ${OTHERBRICK}/DIR/file ]
+
+#Check the DIR on HASHED should have got zeroed layout and the \
+#OTHERBRICK should have got full layout
+EXPECT "0x00000001000000000000000000000000" dht_get_layout $HASHED/DIR ;
+EXPECT "0x000000010000000000000000ffffffff" dht_get_layout $OTHERBRICK/DIR;
+
+## Before killing daemon to avoid deadlocks
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1075087.t b/tests/bugs/glusterd/bug-1075087.t
new file mode 100644
index 00000000000..35155a0b8c9
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1075087.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..10};
+TEST touch $M0/dir{1..10}/files{1..10};
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
+
+TEST pkill gluster
+TEST glusterd
+TEST pidof glusterd
+
+# status should be "completed" immediate after glusterd has respawned.
+EXPECT_WITHIN 5 "completed" rebalance_status_field $V0
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1085330.t b/tests/bugs/glusterd/bug-1085330.t
new file mode 100755
index 00000000000..ffcfe9274eb
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1085330.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+STR="1234567890"
+volname="Vol"
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+
+# Construct volname string such that its more than 256 characters
+for i in {1..30}
+do
+ volname+=$STR
+done
+# Now $volname is more than 256 chars
+
+TEST ! $CLI volume create $volname $H0:$B0/${volname}{1,2};
+
+TEST $CLI volume info;
+
+# Construct brick string such that its more than 256 characters
+volname="Vol1234"
+brick="brick"
+for i in {1..30}
+do
+ brick+=$STR
+done
+# Now $brick1 is more than 256 chars
+
+TEST ! $CLI volume create $volname $H0:$B0/$brick;
+
+TEST $CLI volume info;
+
+# Now try to create a volume with couple of bricks (strlen(volname) = 128 &
+# strlen(brick1) = 128
+# Command should still fail as strlen(volp path) > 256
+
+volname="Volume-0"
+brick="brick-00"
+STR="12345678"
+
+for i in {1..15}
+do
+ volname+=$STR
+ brick+=$STR
+done
+TEST ! $CLI volume create $volname $H0:$B0/$brick;
+
+TEST $CLI volume info;
+
+# test case with brick path as 255 and a trailing "/"
+brick=""
+STR1="12345678"
+volname="vol"
+
+for i in {1..31}
+do
+ brick+=$STR1
+done
+brick+="123456/"
+
+echo $brick | wc -c
+# Now $brick is exactly 255 chars, but at end a trailing space
+# This will still fail as volfpath exceeds more than _POSIX_MAX chars
+
+TEST ! $CLI volume create $volname $H0:$B0/$brick;
+
+TEST $CLI volume info;
+
+# Positive test case
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+TEST $CLI volume info;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1087203.t b/tests/bugs/glusterd/bug-1087203.t
new file mode 100644
index 00000000000..035be098576
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1087203.t
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../cluster.rc
+
+function get_volume_info ()
+{
+ local var=$1
+ $CLI_1 volume info $V0 | grep "^$var" | sed 's/.*: //'
+}
+
+cleanup;
+
+TEST verify_lvm_version
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
+EXPECT "$V0" get_volume_info 'Volume Name';
+EXPECT 'Created' get_volume_info 'Status';
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' get_volume_info 'Status';
+
+
+# Setting system limit
+TEST $CLI_1 snapshot config snap-max-hard-limit 100
+
+# Volume limit cannot exceed system limit, as limit is set to 100,
+# this should fail.
+TEST ! $CLI_1 snapshot config $V0 snap-max-hard-limit 101
+
+# Following are the invalid cases
+TEST ! $CLI_1 snapshot config $V0 snap-max-hard-limit a10
+TEST ! $CLI_1 snapshot config snap-max-hard-limit 10a
+TEST ! $CLI_1 snapshot config snap-max-hard-limit 10%
+TEST ! $CLI_1 snapshot config snap-max-soft-limit 50%1
+TEST ! $CLI_1 snapshot config snap-max-soft-limit 0111
+TEST ! $CLI_1 snapshot config snap-max-hard-limit OXA
+TEST ! $CLI_1 snapshot config snap-max-hard-limit 11.11
+TEST ! $CLI_1 snapshot config snap-max-soft-limit 50%
+TEST ! $CLI_1 snapshot config snap-max-hard-limit -100
+TEST ! $CLI_1 snapshot config snap-max-soft-limit -90
+
+# Soft limit cannot be assigned to volume
+TEST ! $CLI_1 snapshot config $V0 snap-max-soft-limit 10
+
+# Valid case
+TEST $CLI_1 snapshot config snap-max-soft-limit 50
+TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 10
+
+# Validating auto-delete feature
+# Make sure auto-delete is disabled by default
+EXPECT 'disable' snap_config CLI_1 'auto-delete'
+
+# Test for invalid value for auto-delete
+TEST ! $CLI_1 snapshot config auto-delete test
+
+TEST $CLI_1 snapshot config snap-max-hard-limit 6
+TEST $CLI_1 snapshot config snap-max-soft-limit 50
+
+# Create 4 snapshots
+snap_index=1
+snap_count=4
+TEST snap_create CLI_1 $V0 $snap_index $snap_count
+
+# If auto-delete is disabled then oldest snapshot
+# should not be deleted automatically.
+EXPECT '4' get_snap_count CLI_1;
+
+TEST snap_delete CLI_1 $snap_index $snap_count;
+
+# After all those 4 snaps are deleted, There will not be any snaps present
+EXPECT '0' get_snap_count CLI_1;
+
+TEST $CLI_1 snapshot config auto-delete enable
+
+# auto-delete is already enabled, Hence expect a failure.
+TEST ! $CLI_1 snapshot config auto-delete on
+
+# Testing other boolean values with auto-delete
+TEST $CLI_1 snapshot config auto-delete off
+EXPECT 'off' snap_config CLI_1 'auto-delete'
+
+TEST $CLI_1 snapshot config auto-delete true
+EXPECT 'true' snap_config CLI_1 'auto-delete'
+
+# Try to create 4 snaps again, As auto-delete is enabled
+# oldest snap should be deleted and snapcount should be 3
+
+TEST snap_create CLI_1 $V0 $snap_index $snap_count;
+EXPECT '3' get_snap_count CLI_1;
+
+TEST $CLI_1 snapshot config auto-delete disable
+EXPECT 'disable' snap_config CLI_1 'auto-delete'
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/bug-1089668.t b/tests/bugs/glusterd/bug-1089668.t
new file mode 100755
index 00000000000..f2b99bf6051
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1089668.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+cleanup
+
+#This script checks command "gluster volume rebalance <volname> status will not
+#show any output when user have done only remove-brick start and command
+#'gluster volume remove-brick <volname> <brick_name> status' will not show
+#any output when user have triggered only rebalance start.
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0
+
+TEST $CLI volume rebalance $V0 start
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 status
+
+TEST $CLI volume rebalance $V0 stop
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+TEST ! $CLI volume rebalance $V0 status
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1090042.t b/tests/bugs/glusterd/bug-1090042.t
new file mode 100755
index 00000000000..b4df8e6cebe
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1090042.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+TEST glusterd;
+
+TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3;
+TEST $CLI volume start $V0;
+
+TEST kill_brick $V0 $H0 $L1;
+
+#Normal snap create should fail
+TEST ! $CLI snapshot create ${V0}_snap1 $V0;
+TEST ! snapshot_exists 0 ${V0}_snap1;
+
+#Force snap create should succeed
+TEST $CLI snapshot create ${V0}_snap1 $V0 force;
+TEST snapshot_exists 0 ${V0}_snap1;
+
+#Delete the created snap
+TEST $CLI snapshot delete ${V0}_snap1;
+TEST ! snapshot_exists 0 ${V0}_snap1;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t b/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t
new file mode 100755
index 00000000000..01cc5b56097
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+cli1=$(echo $CLI | sed 's/ --wignore//')
+
+# Creating volume with non resolvable host name
+TEST ! $cli1 volume create $V0 replica 2 $H0:$B0/${V0}0 redhat:$B0/${V0}1 \
+ $H0:$B0/${V0}2 redhat:$B0/${V0}3
+
+# Creating distribute-replica volume with bad brick order. It will fail
+# due to bad brick order.
+TEST ! $cli1 volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}3
+
+# Now with force at the end of command it will bypass brick-order check
+# for replicate or distribute-replicate volume. and it will create volume
+TEST $cli1 volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}3 force
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1092841.t b/tests/bugs/glusterd/bug-1092841.t
new file mode 100644
index 00000000000..d3dcf07fd02
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1092841.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+TEST $CLI volume start $V0;
+
+TEST $CLI volume barrier $V0 enable;
+
+TEST ! $CLI volume barrier $V0 enable;
+
+TEST $CLI volume barrier $V0 disable;
+
+TEST ! $CLI volume barrier $V0 disable;
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1095097.t b/tests/bugs/glusterd/bug-1095097.t
new file mode 100755
index 00000000000..0fe29f06630
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1095097.t
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B1/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info
+TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick2 start
+TEST $CLI volume replace-brick $V0 $H0:$B0/brick1 $H0:$B0/brick2 status
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1102656.t b/tests/bugs/glusterd/bug-1102656.t
new file mode 100644
index 00000000000..e80f4930a63
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1102656.t
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume top $V0 open
+TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick
+TEST $CLI volume top $V0 read
+
+TEST $CLI volume status
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status';
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1104642.t b/tests/bugs/glusterd/bug-1104642.t
new file mode 100644
index 00000000000..a45a617d235
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1104642.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+
+function get_value()
+{
+ local key=$1
+ local var="CLI_$2"
+
+ eval cli_index=\$$var
+
+ $cli_index volume info | grep "^$key"\
+ | sed 's/.*: //'
+}
+
+cleanup
+
+TEST launch_cluster 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
+EXPECT "$V0" get_value 'Volume Name' 1
+EXPECT "Created" get_value 'Status' 1
+
+TEST $CLI_1 volume start $V0
+EXPECT "Started" get_value 'Status' 1
+
+#Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+#set the volume all options from the 1st glusterd
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
+
+#Bring back the 2nd glusterd
+TEST $glusterd_2
+
+#Verify whether the value has been synced
+EXPECT '80' get_value 'cluster.server-quorum-ratio' 1
+EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
+EXPECT '80' get_value 'cluster.server-quorum-ratio' 2
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t b/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t
new file mode 100644
index 00000000000..561b90740fa
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1109741-auth-mgmt-handshake.t
@@ -0,0 +1,50 @@
+#! /bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+# The test will attempt to verify that management handshake requests to
+# GlusterD are authenticated before being allowed to change a GlusterD's
+# op-version
+#
+# 1. Launch 3 glusterds
+# 2. Probe 2 of them to form a cluster. This should succeed.
+# 3. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail.
+# 4. a. Reduce the op-version of 3rd GlusterD and restart it.
+# b. Probe either of the first two GlusterD's from the 3rd GlusterD. This should fail.
+# 5. Check current op-version of first two GlusterDs. It shouldn't have changed.
+# 6. Probe third GlusterD from the cluster. This should succeed.
+
+
+cleanup
+
+TEST launch_cluster 3
+
+TEST $CLI_1 peer probe $H2
+
+TEST ! $CLI_3 peer probe $H1
+
+GD1_WD=$($CLI_1 system getwd)
+OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+
+TEST $CLI_3 system uuid get # Needed for glusterd.info to be created
+
+GD3_WD=$($CLI_3 system getwd)
+TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info
+
+TEST kill_glusterd 3
+TEST start_glusterd 3
+
+TEST ! $CLI_3 peer probe $H1
+
+OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
+
+TEST $CLI_1 peer probe $H3
+
+kill_node 1
+kill_node 2
+kill_node 3
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/bug-1109770.t b/tests/bugs/glusterd/bug-1109770.t
new file mode 100644
index 00000000000..eca4969f2e3
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1109770.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+for i in {1..10} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap1 $V0;
+
+for i in {11..20} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap2 $V0;
+
+mkdir $M0/dir1;
+mkdir $M0/dir2;
+
+for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+TEST $CLI snapshot create snap3 $V0;
+
+for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+TEST $CLI snapshot create snap4 $V0;
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+TEST $CLI volume set $V0 features.uss disable;
+
+SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}');
+
+TEST ! [ $SNAPD_PID -gt 0 ];
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+TEST $CLI volume stop $V0;
+
+SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}');
+
+TEST ! [ $SNAPD_PID -gt 0 ];
+
+cleanup ;
diff --git a/tests/bugs/glusterd/bug-1109889.t b/tests/bugs/glusterd/bug-1109889.t
new file mode 100644
index 00000000000..eac5ac17f5b
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1109889.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+MOUNT_PID=`ps ax |grep "glusterfs --volfile-sever $H0 --volfile-id=$V0 $M0" | grep -v grep | awk '{print $1}' | head -1`
+
+for i in {1..10} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot config activate-on-create enable
+
+TEST $CLI snapshot create snap1 $V0;
+
+for i in {11..20} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap2 $V0;
+
+mkdir $M0/dir1;
+mkdir $M0/dir2;
+
+for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+TEST $CLI snapshot create snap3 $V0;
+
+for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+TEST $CLI snapshot create snap4 $V0;
+
+TEST $CLI volume set $V0 features.uss enable;
+
+#let snapd get started properly and client connect to snapd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0
+
+SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}');
+
+TEST [ $SNAPD_PID -gt 0 ];
+
+TEST stat $M0/.snaps;
+
+kill -KILL $SNAPD_PID;
+
+# let snapd die properly
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "0" snap_client_connected_status $V0
+
+TEST ! stat $M0/.snaps;
+
+TEST $CLI volume start $V0 force;
+
+# let client get the snapd port from glusterd and connect
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0
+
+TEST stat $M0/.snaps;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1111041.t b/tests/bugs/glusterd/bug-1111041.t
new file mode 100644
index 00000000000..9e72c50990d
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1111041.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+function is_snapd_running {
+ $CLI volume status $1 | grep "Snapshot Daemon" | wc -l;
+}
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+TEST $CLI volume start $V0;
+
+EXPECT "0" is_snapd_running $v0
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT "1" is_snapd_running $V0
+
+SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}');
+
+TEST [ $SNAPD_PID -gt 0 ];
+
+SNAPD_PID2=$($CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $7'});
+
+TEST [ $SNAPD_PID -eq $SNAPD_PID2 ]
+
+cleanup ;
diff --git a/tests/bugs/glusterd/bug-1112559.t b/tests/bugs/glusterd/bug-1112559.t
new file mode 100755
index 00000000000..f318db61b8a
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1112559.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function check_snaps_status {
+ $CLI_1 snapshot status | grep 'Snap Name : ' | wc -l
+}
+
+function check_snaps_bricks_health {
+ $CLI_1 snapshot status | grep 'Brick Running : Yes' | wc -l
+}
+
+
+SNAP_COMMAND_TIMEOUT=40
+NUMBER_OF_BRICKS=2
+
+cleanup;
+TEST verify_lvm_version
+TEST launch_cluster 3
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
+
+TEST $CLI_1 volume start $V0
+
+#Create snapshot and add a peer together
+$CLI_1 snapshot create ${V0}_snap1 ${V0} &
+PID_1=$!
+$CLI_1 peer probe $H3
+wait $PID_1
+
+#Snapshot should be created and in the snaplist
+TEST snapshot_exists 1 ${V0}_snap1
+
+#Not being paranoid! Just checking for the status of the snapshot
+#During the testing of the bug the snapshot would list but actually
+#not be created.Therefore check for health of the snapshot
+EXPECT_WITHIN $SNAP_COMMAND_TIMEOUT 1 check_snaps_status
+
+#Disabling the checking of snap brick status , Will continue investigation
+#on the failure of the snapbrick port bind issue.
+#EXPECT_WITHIN $SNAP_COMMAND_TIMEOUT $NUMBER_OF_BRICKS check_snaps_bricks_health
+
+#check if the peer is added successfully
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 snapshot delete ${V0}_snap1
+
+cleanup;
+
+
diff --git a/tests/bugs/glusterd/bug-1112613.t b/tests/bugs/glusterd/bug-1112613.t
new file mode 100644
index 00000000000..e566de056bc
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1112613.t
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+V1="patchy2"
+
+TEST verify_lvm_version;
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1
+TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume create $V1 $H2:$L2
+TEST $CLI_1 volume start $V1
+
+# Create 3 snapshots for volume $V0
+snap_count=3
+snap_index=1
+TEST snap_create CLI_1 $V0 $snap_index $snap_count;
+
+# Create 3 snapshots for volume $V1
+snap_count=4
+snap_index=11
+TEST snap_create CLI_1 $V1 $snap_index $snap_count;
+
+EXPECT '3' get_snap_count CLI_1 $V0;
+EXPECT '4' get_snap_count CLI_1 $V1;
+EXPECT '7' get_snap_count CLI_1
+
+TEST $CLI_1 snapshot delete volume $V0
+EXPECT '0' get_snap_count CLI_1 $V0;
+EXPECT '4' get_snap_count CLI_1 $V1;
+EXPECT '4' get_snap_count CLI_1
+
+TEST $CLI_1 snapshot delete all
+EXPECT '0' get_snap_count CLI_1 $V0;
+EXPECT '0' get_snap_count CLI_1 $V1;
+EXPECT '0' get_snap_count CLI_1
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/bug-1113975.t b/tests/bugs/glusterd/bug-1113975.t
new file mode 100644
index 00000000000..c1b9b1e3e2c
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1113975.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+for i in {1..10} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap1 $V0;
+
+for i in {11..20} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap2 $V0;
+
+TEST $CLI volume stop $V0
+
+TEST $CLI snapshot restore snap1;
+
+TEST $CLI snapshot restore snap2;
+
+TEST $CLI volume start $V0
+
+cleanup ;
diff --git a/tests/bugs/glusterd/bug-1120647.t b/tests/bugs/glusterd/bug-1120647.t
new file mode 100644
index 00000000000..0223f460398
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1120647.t
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{1..4}
+TEST $CLI volume start $V0
+TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} start
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/brick{3..4}"
+TEST $CLI volume remove-brick $V0 $H0:$B0/brick{3..4} commit
+TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/brick2 force
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1140162-file-snapshot-and-features-encryption-option-validation.t b/tests/bugs/glusterd/bug-1140162-file-snapshot-and-features-encryption-option-validation.t
new file mode 100644
index 00000000000..f91093db4e7
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1140162-file-snapshot-and-features-encryption-option-validation.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+## Test case for BZ-1140160 Volume option set <vol> <file-snapshot> and
+## <features.encryption> <value> command input should validate correctly.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create and start volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0
+
+## Set features.file-snapshot and features.encryption option with non-boolean
+## value. These options should fail.
+TEST ! $CLI volume set $V0 features.file-snapshot abcd
+TEST ! $CLI volume set $V0 features.encryption redhat
+
+## Set other options with valid value. These options should succeed.
+TEST $CLI volume set $V0 barrier enable
+TEST $CLI volume set $V0 ping-timeout 60
+
+## Set features.file-snapshot and features.encryption option with valid boolean
+## value. These options should succeed.
+TEST $CLI volume set $V0 features.file-snapshot on
+TEST $CLI volume set $V0 features.encryption on
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t b/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t
new file mode 100755
index 00000000000..5a6cf81fd53
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1173414-mgmt-v3-remote-lock-failure.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1
+TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume start $V1
+
+for i in {1..20}
+do
+ $CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
+ $CLI_1 volume set $V1 barrier on
+ $CLI_2 volume set $V0 diagnostics.client-log-level DEBUG &
+ $CLI_2 volume set $V1 barrier on
+done
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+TEST $CLI_1 volume status
+TEST $CLI_2 volume status
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-765230.t b/tests/bugs/glusterd/bug-765230.t
new file mode 100755
index 00000000000..e0b9608d728
--- /dev/null
+++ b/tests/bugs/glusterd/bug-765230.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting quota-timeout as 20
+TEST ! $CLI volume set $V0 features.quota-timeout 20
+EXPECT '' volinfo_field $V0 'features.quota-timeout';
+
+## Enabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+## Enabling quota
+TEST $CLI volume quota $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.quota'
+
+## Setting quota-timeout as 20
+TEST $CLI volume set $V0 features.quota-timeout 20
+EXPECT '20' volinfo_field $V0 'features.quota-timeout';
+
+## Enabling features.quota-deem-statfs
+TEST $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+## Disabling quota
+TEST $CLI volume quota $V0 disable
+EXPECT 'off' volinfo_field $V0 'features.quota'
+
+## Setting quota-timeout as 30
+TEST ! $CLI volume set $V0 features.quota-timeout 30
+EXPECT '20' volinfo_field $V0 'features.quota-timeout';
+
+## Disabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs off
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-782095.t b/tests/bugs/glusterd/bug-782095.t
new file mode 100755
index 00000000000..dd8a8dc3026
--- /dev/null
+++ b/tests/bugs/glusterd/bug-782095.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting performance cache min size as 2MB
+TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## Setting performance cache max size as 20MB
+TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## Trying to set performance cache min size as 25MB
+TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## Able to set performance cache min size as long as its lesser than max size
+TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
+EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## Trying it out with only cache-max-file-size in CLI as 10MB
+TEST ! $CLI volume set $V0 cache-max-file-size 10MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-824753-file-locker.c b/tests/bugs/glusterd/bug-824753-file-locker.c
new file mode 100644
index 00000000000..ea8a7630e81
--- /dev/null
+++ b/tests/bugs/glusterd/bug-824753-file-locker.c
@@ -0,0 +1,42 @@
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+
+int main (int argc, char *argv[])
+{
+ int fd = -1;
+ int ret = -1;
+ char command[2048] = "";
+ char filepath[255] = "";
+ struct flock fl;
+
+ fl.l_type = F_WRLCK;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = 7;
+ fl.l_len = 1;
+ fl.l_pid = getpid();
+
+ snprintf(filepath, 255, "%s/%s", argv[4], argv[5]);
+
+ fd = open(filepath, O_RDWR);
+
+ if (fd == -1)
+ return -1;
+
+ if (fcntl(fd, F_SETLKW, &fl) == -1) {
+ return -1;
+ }
+
+ snprintf(command, sizeof(command),
+ "gluster volume clear-locks %s /%s kind all posix 0,7-1 |"
+ " grep %s | awk -F'..: ' '{print $1}' | grep %s:%s/%s",
+ argv[1], argv[5], argv[2], argv[2], argv[3], argv[1]);
+
+ ret = system (command);
+ close(fd);
+
+ if (ret)
+ return -1;
+ else
+ return 0;
+}
diff --git a/tests/bugs/glusterd/bug-824753.t b/tests/bugs/glusterd/bug-824753.t
new file mode 100755
index 00000000000..2ce4a07c5bd
--- /dev/null
+++ b/tests/bugs/glusterd/bug-824753.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+touch $M0/file1;
+
+TEST $CC -g $(dirname $0)/bug-824753-file-locker.c -o $(dirname $0)/file-locker
+
+TEST $(dirname $0)/file-locker $V0 $H0 $B0 $M0 file1
+
+## Finish up
+TEST rm -f $(dirname $0)/file-locker
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-839595.t b/tests/bugs/glusterd/bug-839595.t
new file mode 100644
index 00000000000..b2fe9789a8c
--- /dev/null
+++ b/tests/bugs/glusterd/bug-839595.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 cluster.server-quorum-type server
+EXPECT "server" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume set $V0 cluster.server-quorum-type none
+EXPECT "none" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume reset $V0 cluster.server-quorum-type
+TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
+TEST ! $CLI volume set all cluster.server-quorum-type none
+TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
+
+TEST ! $CLI volume set all cluster.server-quorum-ratio abc
+TEST ! $CLI volume set all cluster.server-quorum-ratio -1
+TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
+TEST $CLI volume set all cluster.server-quorum-ratio 0
+EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100
+EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
+EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100%
+EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
+cleanup;
diff --git a/tests/bugs/glusterd/bug-857330/common.rc b/tests/bugs/glusterd/bug-857330/common.rc
new file mode 100644
index 00000000000..8342dccb442
--- /dev/null
+++ b/tests/bugs/glusterd/bug-857330/common.rc
@@ -0,0 +1,55 @@
+. $(dirname $0)/../../../include.rc
+
+UUID_REGEX='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
+
+TASK_ID=""
+COMMAND=""
+PATTERN=""
+
+function check-and-store-task-id()
+{
+ TASK_ID=""
+
+ local task_id=$($CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX")
+
+ if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then
+ return 1
+ fi
+
+ TASK_ID=$task_id
+ return 0;
+}
+
+function get-task-id()
+{
+ $CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX" | tail -n1
+
+}
+
+function check-and-store-task-id-xml()
+{
+ TASK_ID=""
+
+ local task_id=$($CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX")
+
+ if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then
+ return 1
+ fi
+
+ TASK_ID=$task_id
+ return 0;
+}
+
+function get-task-id-xml()
+{
+ $CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX"
+}
+
+function get-task-status()
+{
+ $CLI $COMMAND | grep -o $PATTERN
+ if [ ${PIPESTATUS[0]} -ne 0 ]; then
+ return 1
+ fi
+ return 0
+}
diff --git a/tests/bugs/glusterd/bug-857330/normal.t b/tests/bugs/glusterd/bug-857330/normal.t
new file mode 100755
index 00000000000..02018f244a8
--- /dev/null
+++ b/tests/bugs/glusterd/bug-857330/normal.t
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+. $(dirname $0)/common.rc
+. $(dirname $0)/../../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+TEST $CLI volume info $V0;
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
+
+TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \
+ --multi -b 10 -d 10 -n 10 $M0;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+###############
+## Rebalance ##
+###############
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2;
+
+COMMAND="volume rebalance $V0 start"
+PATTERN="ID:"
+TEST check-and-store-task-id
+
+COMMAND="volume status $V0"
+PATTERN="ID"
+EXPECT $TASK_ID get-task-id
+
+COMMAND="volume rebalance $V0 status"
+PATTERN="completed"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+###################
+## Replace-brick ##
+###################
+REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3"
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start"
+PATTERN="ID:"
+TEST check-and-store-task-id
+
+COMMAND="volume status $V0"
+PATTERN="ID"
+EXPECT $TASK_ID get-task-id
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status"
+PATTERN="complete"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+TEST $CLI volume replace-brick $V0 $REP_BRICK_PAIR commit;
+
+##################
+## Remove-brick ##
+##################
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start"
+PATTERN="ID:"
+TEST check-and-store-task-id
+
+COMMAND="volume status $V0"
+PATTERN="ID"
+EXPECT $TASK_ID get-task-id
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status"
+PATTERN="completed"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 commit
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-857330/xml.t b/tests/bugs/glusterd/bug-857330/xml.t
new file mode 100755
index 00000000000..3aec3b89bbe
--- /dev/null
+++ b/tests/bugs/glusterd/bug-857330/xml.t
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+. $(dirname $0)/common.rc
+. $(dirname $0)/../../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+TEST $CLI volume info $V0;
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
+
+TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \
+ --multi -b 10 -d 10 -n 10 $M0;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+
+###############
+## Rebalance ##
+###############
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2;
+
+COMMAND="volume rebalance $V0 start"
+PATTERN="task-id"
+TEST check-and-store-task-id-xml
+
+COMMAND="volume status $V0"
+PATTERN="id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume rebalance $V0 status"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+## TODO: Add tests for rebalance stop
+
+COMMAND="volume rebalance $V0 status"
+PATTERN="completed"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+###################
+## Replace-brick ##
+###################
+REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}3"
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR start"
+PATTERN="task-id"
+TEST check-and-store-task-id-xml
+
+COMMAND="volume status $V0"
+PATTERN="id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+## TODO: Add more tests for replace-brick pause|abort
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status"
+PATTERN="complete"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR commit"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+##################
+## Remove-brick ##
+##################
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 start"
+PATTERN="task-id"
+TEST check-and-store-task-id-xml
+
+COMMAND="volume status $V0"
+PATTERN="id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status"
+PATTERN="completed"
+EXPECT_WITHIN 300 $PATTERN get-task-status
+
+## TODO: Add tests for remove-brick stop
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 commit"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-859927.t b/tests/bugs/glusterd/bug-859927.t
new file mode 100755
index 00000000000..c30d2b852d4
--- /dev/null
+++ b/tests/bugs/glusterd/bug-859927.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+glusterd;
+
+TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+TEST ! $CLI volume set $V0 statedump-path ""
+TEST ! $CLI volume set $V0 statedump-path " "
+TEST $CLI volume set $V0 statedump-path "/home/"
+EXPECT "/home/" volume_option $V0 server.statedump-path
+
+TEST ! $CLI volume set $V0 background-self-heal-count ""
+TEST ! $CLI volume set $V0 background-self-heal-count " "
+TEST $CLI volume set $V0 background-self-heal-count 10
+EXPECT "10" volume_option $V0 cluster.background-self-heal-count
+
+TEST ! $CLI volume set $V0 cache-size ""
+TEST ! $CLI volume set $V0 cache-size " "
+TEST $CLI volume set $V0 cache-size 512MB
+EXPECT "512MB" volume_option $V0 performance.cache-size
+
+TEST ! $CLI volume set $V0 self-heal-daemon ""
+TEST ! $CLI volume set $V0 self-heal-daemon " "
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT "on" volume_option $V0 cluster.self-heal-daemon
+
+TEST ! $CLI volume set $V0 read-subvolume ""
+TEST ! $CLI volume set $V0 read-subvolume " "
+TEST $CLI volume set $V0 read-subvolume $V0-client-0
+EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
+
+TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
+TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
+TEST ! $CLI volume set $V0 data-self-heal-algorithm on
+TEST $CLI volume set $V0 data-self-heal-algorithm full
+EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
+
+TEST ! $CLI volume set $V0 min-free-inodes ""
+TEST ! $CLI volume set $V0 min-free-inodes " "
+TEST $CLI volume set $V0 min-free-inodes 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-inodes
+
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk " "
+TEST $CLI volume set $V0 min-free-disk 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-disk
+
+TEST $CLI volume set $V0 min-free-disk 120
+EXPECT "120" volume_option $V0 cluster.min-free-disk
+
+TEST ! $CLI volume set $V0 frame-timeout ""
+TEST ! $CLI volume set $V0 frame-timeout " "
+TEST $CLI volume set $V0 frame-timeout 0
+EXPECT "0" volume_option $V0 network.frame-timeout
+
+TEST ! $CLI volume set $V0 auth.allow ""
+TEST ! $CLI volume set $V0 auth.allow " "
+TEST $CLI volume set $V0 auth.allow 192.168.122.1
+EXPECT "192.168.122.1" volume_option $V0 auth.allow
+
+TEST ! $CLI volume set $V0 stripe-block-size ""
+TEST ! $CLI volume set $V0 stripe-block-size " "
+TEST $CLI volume set $V0 stripe-block-size 512MB
+EXPECT "512MB" volume_option $V0 cluster.stripe-block-size
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-862834.t b/tests/bugs/glusterd/bug-862834.t
new file mode 100755
index 00000000000..ac2f956a1ed
--- /dev/null
+++ b/tests/bugs/glusterd/bug-862834.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+V1="patchy2"
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+function check_brick()
+{
+ vol=$1;
+ num=$2
+ $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
+}
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '2' brick_count $V0
+
+
+EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
+EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
+
+TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-878004.t b/tests/bugs/glusterd/bug-878004.t
new file mode 100644
index 00000000000..8abada3c3b3
--- /dev/null
+++ b/tests/bugs/glusterd/bug-878004.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3;
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+
+TEST $CLI volume start $V0
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
+EXPECT '2' brick_count $V0
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force;
+EXPECT '1' brick_count $V0
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/bug-888752.t b/tests/bugs/glusterd/bug-888752.t
new file mode 100644
index 00000000000..ed0602e34e2
--- /dev/null
+++ b/tests/bugs/glusterd/bug-888752.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+# Check if xml output is generated correctly for volume status for a single brick
+# present on another peer and no async tasks are running.
+
+function get_peer_count {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+cleanup
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 get_peer_count
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
+
+TEST $CLI_1 volume stop $V0
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-889630.t b/tests/bugs/glusterd/bug-889630.t
new file mode 100755
index 00000000000..4fefd94d66f
--- /dev/null
+++ b/tests/bugs/glusterd/bug-889630.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function volume_count {
+ local cli=$1;
+ if [ $cli -eq '1' ] ; then
+ $CLI_1 volume info | grep 'Volume Name' | wc -l;
+ else
+ $CLI_2 volume info | grep 'Volume Name' | wc -l;
+ fi
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+b="B1";
+
+#Create an extra file in the originator's volume store
+touch ${!b}/glusterd/vols/$V0/run/file
+
+TEST $CLI_1 volume stop $V0
+#Test for self-commit failure
+TEST $CLI_1 volume delete $V0
+
+#Check whether delete succeeded on both the nodes
+EXPECT "0" volume_count '1'
+EXPECT "0" volume_count '2'
+
+#Check whether the volume name can be reused after deletion
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
+TEST $CLI_1 volume start $V0
+
+#Create an extra file in the peer's volume store
+touch ${!b}/glusterd/vols/$V0/run/file
+
+TEST $CLI_1 volume stop $V0
+#Test for commit failure on the other node
+TEST $CLI_2 volume delete $V0
+
+EXPECT "0" volume_count '1';
+EXPECT "0" volume_count '2';
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-905307.t b/tests/bugs/glusterd/bug-905307.t
new file mode 100644
index 00000000000..dd1c1bc0795
--- /dev/null
+++ b/tests/bugs/glusterd/bug-905307.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+#test functionality of post-op-delay-secs
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+
+#Strings should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
+
+#-ve ints should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
+
+#INT_MAX+1 should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
+
+#floats should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
+
+#min val 0 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
+EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
+
+#max val 2147483647 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
+EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
+
+#some middle val in range 2147 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
+EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
+cleanup;
diff --git a/tests/bugs/glusterd/bug-913487.t b/tests/bugs/glusterd/bug-913487.t
new file mode 100644
index 00000000000..9c616ea28fb
--- /dev/null
+++ b/tests/bugs/glusterd/bug-913487.t
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST ! $CLI volume set $V0 performance.open-behind off;
+
+TEST pidof glusterd;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-913555.t b/tests/bugs/glusterd/bug-913555.t
new file mode 100755
index 00000000000..4f9e004a654
--- /dev/null
+++ b/tests/bugs/glusterd/bug-913555.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+# Test that a volume becomes unwritable when the cluster loses quorum.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+
+function check_fs {
+ df $1 &> /dev/null
+ echo $?
+}
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function glusterfsd_count {
+ pidof glusterfsd | wc -w;
+}
+
+cleanup;
+
+TEST launch_cluster 3; # start 3-node virtual cluster
+TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+TEST $CLI_1 volume start $V0
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+
+# Kill one pseudo-node, make sure the others survive and volume stays up.
+TEST kill_node 3;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+EXPECT 0 check_fs $M0;
+EXPECT 2 glusterfsd_count;
+
+# Kill another pseudo-node, make sure the last one dies and volume goes down.
+TEST kill_node 2;
+EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
+EXPECT 1 check_fs $M0;
+EXPECT 0 glusterfsd_count; # the two glusterfsds of the other two glusterds
+ # must be dead
+
+TEST $glusterd_2;
+TEST $glusterd_3;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 glusterfsd_count; # restore quorum, all ok
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-916549.t b/tests/bugs/glusterd/bug-916549.t
new file mode 100755
index 00000000000..bedbdd60bb6
--- /dev/null
+++ b/tests/bugs/glusterd/bug-916549.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd;
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+TEST $CLI volume start $V0;
+
+pid_file=$(ls $GLUSTERD_WORKDIR/vols/$V0/run);
+brick_pid=$(cat $GLUSTERD_WORKDIR/vols/$V0/run/$pid_file);
+
+
+kill -SIGKILL $brick_pid;
+TEST $CLI volume start $V0 force;
+TEST process_leak_count $(pidof glusterd);
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948686.t b/tests/bugs/glusterd/bug-948686.t
new file mode 100755
index 00000000000..dfe11ff153f
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948686.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+cleanup;
+#setup cluster and test volume
+TEST launch_cluster 3; # start 3-node virtual cluster
+TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume start $V0
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+
+#kill a node
+TEST kill_node 3
+
+#modify volume config to see change in volume-sync
+TEST $CLI_1 volume set $V0 write-behind off
+#add some files to the volume to see effect of volume-heal cmd
+TEST touch $M0/{1..100};
+TEST $CLI_1 volume stop $V0;
+TEST $glusterd_3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
+TEST $CLI_3 volume start $V0;
+TEST $CLI_2 volume stop $V0;
+TEST $CLI_2 volume delete $V0;
+
+cleanup;
+
+TEST glusterd;
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+TEST $CLI volume status $V0
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948729/bug-948729-force.t b/tests/bugs/glusterd/bug-948729/bug-948729-force.t
new file mode 100644
index 00000000000..f4f71f9a1e2
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948729/bug-948729-force.t
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+V1=patchy1
+V2=patchy2
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+B3=/d/backends/3
+B4=/d/backends/4
+B5=/d/backends/5
+B6=/d/backends/6
+
+mkdir -p $B3 $B4 $B5 $B6
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+TEST truncate -s 16M $B4/brick4
+TEST truncate -s 16M $B5/brick5
+TEST truncate -s 16M $B6/brick6
+
+TEST LD1=`SETUP_LOOP $B1/brick1`
+TEST MKFS_LOOP $LD1
+TEST LD2=`SETUP_LOOP $B2/brick2`
+TEST MKFS_LOOP $LD2
+TEST LD3=`SETUP_LOOP $B3/brick3`
+TEST MKFS_LOOP $LD3
+TEST LD4=`SETUP_LOOP $B4/brick4`
+TEST MKFS_LOOP $LD4
+TEST LD5=`SETUP_LOOP $B5/brick5`
+TEST MKFS_LOOP $LD5
+TEST LD6=`SETUP_LOOP $B6/brick6`
+TEST MKFS_LOOP $LD6
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 $B4/$V0 $B5/$V0 $B6/$V0
+
+TEST MOUNT_LOOP $LD1 $B1/$V0
+TEST MOUNT_LOOP $LD2 $B2/$V0
+TEST MOUNT_LOOP $LD3 $B3/$V0
+TEST MOUNT_LOOP $LD4 $B4/$V0
+TEST MOUNT_LOOP $LD5 $B5/$V0
+TEST MOUNT_LOOP $LD6 $B6/$V0
+
+#Case 0: Parent directory of the brick is absent
+TEST ! $CLI1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 force
+
+#Case 1: File system root is being used as brick directory
+TEST $CLI1 volume create $V0 $H1:$B5/$V0 $H2:$B6/$V0 force
+
+#Case 2: Brick directory contains only one component
+TEST $CLI1 volume create $V1 $H1:/$uuid1 $H2:/$uuid2 force
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $CLI1 volume create $V2 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 force
+
+#add-brick tests
+TEST ! $CLI1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 force
+TEST $CLI1 volume add-brick $V0 $H1:$B3/$V0 force
+TEST $CLI1 volume add-brick $V1 $H1:/$uuid3 force
+TEST $CLI1 volume add-brick $V2 $H1:$B4/$V0/brick3 force
+
+#####replace-brick tests
+#FIX-ME: replace-brick does not work with the newly introduced cluster test
+#####framework
+
+rmdir /$uuid1 /$uuid2 /$uuid3;
+
+$CLI volume stop $V0
+$CLI volume stop $V1
+$CLI volume stop $V2
+
+UMOUNT_LOOP $B1/$V0
+UMOUNT_LOOP $B2/$V0
+UMOUNT_LOOP $B3/$V0
+UMOUNT_LOOP $B4/$V0
+UMOUNT_LOOP $B5/$V0
+UMOUNT_LOOP $B6/$V0
+
+rm -f $B1/brick1
+rm -f $B2/brick2
+rm -f $B3/brick3
+rm -f $B4/brick4
+rm -f $B5/brick5
+rm -f $B6/brick6
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t b/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t
new file mode 100644
index 00000000000..18bf9a1c4b6
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+B3=/d/backends/3
+mkdir -p $B3
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+
+TEST LD1=`SETUP_LOOP $B1/brick1`
+TEST MKFS_LOOP $LD1
+TEST LD2=`SETUP_LOOP $B2/brick2`
+TEST MKFS_LOOP $LD2
+TEST LD3=`SETUP_LOOP $B3/brick3`
+TEST MKFS_LOOP $LD3
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0
+
+TEST MOUNT_LOOP $LD1 $B1/$V0
+TEST MOUNT_LOOP $LD2 $B2/$V0
+TEST MOUNT_LOOP $LD3 $B3/$V0
+
+cli1=$(echo $CLI1 | sed 's/ --wignore//')
+
+#Case 0: Parent directory of the brick is absent
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2
+
+#Case 1: File system root being used as brick directory
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+
+#Case 2: Brick directory contains only one component
+TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2
+
+#add-brick tests
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0
+TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3
+TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3
+
+#####replace-brick tests
+#FIX-ME : replace-brick does not currently work in the newly introduced
+#####cluster test framework
+
+$CLI1 volume stop $V0
+
+UMOUNT_LOOP $B1/$V0
+UMOUNT_LOOP $B2/$V0
+UMOUNT_LOOP $B3/$V0
+
+rm -f $B1/brick1
+rm -f $B2/brick2
+rm -f $B3/brick3
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948729/bug-948729.t b/tests/bugs/glusterd/bug-948729/bug-948729.t
new file mode 100644
index 00000000000..2b574aa1a14
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948729/bug-948729.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+B3=/d/backends/3
+
+mkdir -p $B3
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+
+TEST LD1=`SETUP_LOOP $B1/brick1`
+TEST MKFS_LOOP $LD1
+TEST LD2=`SETUP_LOOP $B2/brick2`
+TEST MKFS_LOOP $LD2
+TEST LD3=`SETUP_LOOP $B3/brick3`
+TEST MKFS_LOOP $LD3
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0
+
+TEST MOUNT_LOOP $LD1 $B1/$V0
+TEST MOUNT_LOOP $LD2 $B2/$V0
+TEST MOUNT_LOOP $LD3 $B3/$V0
+
+#Tests without options 'mode=script' and 'wignore'
+cli1=$(echo $CLI1 | sed 's/ --mode=script//')
+cli1=$(echo $cli1 | sed 's/ --wignore//')
+#Case 0: Parent directory of the brick is absent
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2
+
+#Case 1: File system root being used as brick directory
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+
+#Case 2: Brick directory contains only one component
+TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2
+
+#add-brick tests
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/b3
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0
+TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3
+TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3
+
+#####replace-brick tests
+#FIX-ME: Replace-brick does not work currently in the newly introduced cluster
+#####test framework.
+
+$CLI1 volume stop $V0
+
+UMOUNT_LOOP $B1/$V0
+UMOUNT_LOOP $B2/$V0
+UMOUNT_LOOP $B3/$V0
+
+rm -f $B1/brick1
+rm -f $B2/brick2
+rm -f $B3/brick3
+
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-949930.t b/tests/bugs/glusterd/bug-949930.t
new file mode 100644
index 00000000000..774802a66b2
--- /dev/null
+++ b/tests/bugs/glusterd/bug-949930.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+V1=patchy2
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0;
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
+TEST $CLI volume start $V1;
+
+TEST ! $CLI volume set $V0 performance.nfs.read-ahead blah
+EXPECT '' volume_option $V0 performance.nfs.read-ahead
+
+TEST $CLI volume set $V0 performance.nfs.read-ahead on
+EXPECT "on" volume_option $V0 performance.nfs.read-ahead
+
+EXPECT '' volume_option $V1 performance.nfs.read-ahead
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/bug-955588.t b/tests/bugs/glusterd/bug-955588.t
new file mode 100755
index 00000000000..028a34edd7d
--- /dev/null
+++ b/tests/bugs/glusterd/bug-955588.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+function get_brick_host_uuid()
+{
+ local vol=$1;
+ local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
+ local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
+
+ echo $host_uuid_list | awk '{print $1}'
+}
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+
+uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
+EXPECT $uuid get_brick_host_uuid $V0
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-958790.t b/tests/bugs/glusterd/bug-958790.t
new file mode 100644
index 00000000000..39be0a19137
--- /dev/null
+++ b/tests/bugs/glusterd/bug-958790.t
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+touch $GLUSTERD_WORKDIR/groups/test
+echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
+echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 group test
+EXPECT "off" volume_option $V0 performance.read-ahead
+EXPECT "off" volume_option $V0 performance.open-behind
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-961669.t b/tests/bugs/glusterd/bug-961669.t
new file mode 100644
index 00000000000..b02f2f50af1
--- /dev/null
+++ b/tests/bugs/glusterd/bug-961669.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+#Test case: Fail remove-brick 'start' variant when reducing the replica count of a volume.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a 3x3 dist-rep volume
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5,6,7,8};
+TEST $CLI volume start $V0
+
+# Mount FUSE and create file/directory
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/zerobytefile.txt
+TEST mkdir $M0/test_dir
+TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
+
+function remove_brick_start {
+ $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} start 2>&1|grep -oE 'success|failed'
+}
+
+function remove_brick {
+ $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{1,4,7} force 2>&1|grep -oE 'success|failed'
+}
+
+#remove-brick start variant
+#Actual message displayed at cli is:
+#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
+EXPECT "failed" remove_brick_start;
+
+#remove-brick commit-force
+#Actual message displayed at cli is:
+#"volume remove-brick commit force: success"
+EXPECT "success" remove_brick
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-963541.t b/tests/bugs/glusterd/bug-963541.t
new file mode 100755
index 00000000000..611626a0d10
--- /dev/null
+++ b/tests/bugs/glusterd/bug-963541.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
+TEST $CLI volume start $V0;
+
+# Start a remove-brick and try to start a rebalance/remove-brick without committing
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+TEST ! $CLI volume rebalance $V0 start
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+
+#Try to start rebalance/remove-brick again after commit
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
+
+gluster volume status
+
+TEST $CLI volume rebalance $V0 start
+TEST $CLI volume rebalance $V0 stop
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
+
+TEST $CLI volume stop $V0
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/bug-964059.t b/tests/bugs/glusterd/bug-964059.t
new file mode 100755
index 00000000000..7b4f60454b8
--- /dev/null
+++ b/tests/bugs/glusterd/bug-964059.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function volume_count {
+ local cli=$1;
+ if [ $cli -eq '1' ] ; then
+ $CLI_1 volume info | grep 'Volume Name' | wc -l;
+ else
+ $CLI_2 volume info | grep 'Volume Name' | wc -l;
+ fi
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/$V0 start
+TEST $CLI_1 volume status
+cleanup;
diff --git a/tests/bugs/glusterd/bug-974007.t b/tests/bugs/glusterd/bug-974007.t
new file mode 100644
index 00000000000..5759adb583f
--- /dev/null
+++ b/tests/bugs/glusterd/bug-974007.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+#Test case: Create a distributed replicate volume, and remove multiple
+#replica pairs in a single remove-brick command.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a 3X2 distributed-replicate volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+# Mount FUSE and create files
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file{1..10}
+
+# Remove bricks from two sub-volumes to make it a 1x2 vol.
+# Bricks in question are given in a random order but from the same subvols.
+function remove_brick_start_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}5 start 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_start_status;
+
+# Wait for rebalance to complete
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}6 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}5"
+
+# Check commit status
+function remove_brick_commit_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}5 commit 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_commit_status;
+
+# Check the volume type
+EXPECT "Replicate" echo `$CLI volume info |grep Type |awk '{print $2}'`
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;