summaryrefslogtreecommitdiffstats
path: root/tests/000-flaky
diff options
context:
space:
mode:
authorAmar Tumballi <amar@kadalu.io>2020-08-18 14:08:20 +0530
committerRinku Kothiya <rkothiya@redhat.com>2020-09-14 19:41:21 +0000
commit473453c4e2b1b6fc94edbce438dd9a3c0ea58c67 (patch)
treee9d335860880dea0da1ae62d5f312f8c7c90d240 /tests/000-flaky
parent635dcf82505efcdeaf01c4e0450a157b533099ba (diff)
tests: provide an option to mark tests as 'flaky'
* also add some time gap in other tests to see if we get things properly * create a directory 'tests/000/', which can host any tests, which are flaky. * move all the tests mentioned in the issue to above directory. * as the above dir gets tested first, all flaky tests would be reported quickly. * change `run-tests.sh` to continue tests even if flaky tests fail. Reference: gluster/project-infrastructure#72 Updates: #1000 Change-Id: Ifdafa38d083ebd80f7ae3cbbc9aa3b68b6d21d0e Signed-off-by: Amar Tumballi <amar@kadalu.io> (cherry picked from 097db13c11390174c5b9f11aa0fd87eca1735871)
Diffstat (limited to 'tests/000-flaky')
-rw-r--r--tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t203
-rw-r--r--tests/000-flaky/basic_changelog_changelog-snapshot.t60
-rw-r--r--tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t142
-rw-r--r--tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t50
-rw-r--r--tests/000-flaky/basic_mount-nfs-auth.t342
-rw-r--r--tests/000-flaky/bugs_core_multiplex-limit-issue-151.t56
-rw-r--r--tests/000-flaky/bugs_distribute_bug-1117851.t101
-rw-r--r--tests/000-flaky/bugs_distribute_bug-1122443.t61
-rw-r--r--tests/000-flaky/bugs_glusterd_bug-857330/common.rc57
-rwxr-xr-xtests/000-flaky/bugs_glusterd_bug-857330/normal.t69
-rwxr-xr-xtests/000-flaky/bugs_glusterd_bug-857330/xml.t83
-rw-r--r--tests/000-flaky/bugs_glusterd_quorum-value-check.t37
-rw-r--r--tests/000-flaky/bugs_nfs_bug-1116503.t47
-rw-r--r--tests/000-flaky/features_lock-migration_lkmigration-set-option.t34
14 files changed, 1342 insertions, 0 deletions
diff --git a/tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t b/tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t
new file mode 100644
index 00000000000..77d82a4996f
--- /dev/null
+++ b/tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t
@@ -0,0 +1,203 @@
+#!/bin/bash
+
+#Test the split-brain resolution CLI commands.
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/file
+
+############ Healing using favorite-child-policy = ctime #################
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+#file still in split-brain
+cat $M0/file > /dev/null
+EXPECT "1" echo $?
+
+# Umount to prevent further FOPS on the file, then find the brick with latest ctime.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+ctime1=`stat -c "%.Z" $B0/${V0}0/file`
+ctime2=`stat -c "%.Z" $B0/${V0}1/file`
+if (( $(echo "$ctime1 > $ctime2" | bc -l) )); then
+ LATEST_CTIME_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+else
+ LATEST_CTIME_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+fi
+TEST $CLI volume set $V0 cluster.favorite-child-policy ctime
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+TEST [ "$LATEST_CTIME_MD5" == "$B0_MD5" ]
+TEST [ "$LATEST_CTIME_MD5" == "$B1_MD5" ]
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cat $M0/file > /dev/null
+EXPECT "0" echo $?
+
+############ Healing using favorite-child-policy = mtime #################
+TEST $CLI volume set $V0 cluster.favorite-child-policy none
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+#file still in split-brain
+cat $M0/file > /dev/null
+EXPECT "1" echo $?
+
+#We know that the second brick has latest mtime.
+LATEST_CTIME_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+cat $M0/file > /dev/null
+EXPECT "0" echo $?
+HEALED_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+TEST [ "$LATEST_CTIME_MD5" == "$HEALED_MD5" ]
+
+############ Healing using favorite-child-policy = size #################
+TEST $CLI volume set $V0 cluster.favorite-child-policy none
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=10240
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+#file still in split-brain
+cat $M0/file > /dev/null
+EXPECT "1" echo $?
+
+#We know that the second brick has the bigger size file.
+BIGGER_FILE_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+TEST $CLI volume set $V0 cluster.favorite-child-policy size
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+cat $M0/file > /dev/null
+EXPECT "0" echo $?
+HEALED_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+TEST [ "$BIGGER_FILE_MD5" == "$HEALED_MD5" ]
+
+############ Healing using favorite-child-policy = majority on replica-3 #################
+
+#Convert volume to replica-3
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST $CLI volume set $V0 cluster.quorum-type none
+TEST $CLI volume set $V0 cluster.favorite-child-policy none
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=10240
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+
+#file still in split-brain
+cat $M0/file > /dev/null
+EXPECT "1" echo $?
+
+#We know that the second and third bricks agree with each other. Pick any one of them.
+MAJORITY_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+TEST $CLI volume set $V0 cluster.favorite-child-policy majority
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+cat $M0/file > /dev/null
+EXPECT "0" echo $?
+HEALED_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+TEST [ "$MAJORITY_MD5" == "$HEALED_MD5" ]
+
+TEST force_umount $M0
+cleanup
diff --git a/tests/000-flaky/basic_changelog_changelog-snapshot.t b/tests/000-flaky/basic_changelog_changelog-snapshot.t
new file mode 100644
index 00000000000..f6cd0b04d47
--- /dev/null
+++ b/tests/000-flaky/basic_changelog_changelog-snapshot.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../snapshot.rc
+
+cleanup;
+ROLLOVER_TIME=3
+
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+BRICK_LOG=$(echo "$L1" | tr / - | sed 's/^-//g')
+TEST $CLI volume start $V0
+
+#Enable changelog
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Create snapshot
+S1="${V0}-snap1"
+
+mkdir $M0/RENAME
+mkdir $M0/LINK
+mkdir $M0/UNLINK
+mkdir $M0/RMDIR
+mkdir $M0/SYMLINK
+
+for i in {1..400} ; do touch $M0/RENAME/file$i; done
+for i in {1..400} ; do touch $M0/LINK/file$i; done
+for i in {1..400} ; do touch $M0/UNLINK/file$i; done
+for i in {1..400} ; do mkdir $M0/RMDIR/dir$i; done
+for i in {1..400} ; do touch $M0/SYMLINK/file$i; done
+
+#Write I/O in background
+for i in {1..400} ; do touch $M0/file$i 2>/dev/null; done &
+for i in {1..400} ; do mknod $M0/mknod-file$i p 2>/dev/null; done &
+for i in {1..400} ; do mkdir $M0/dir$i 2>/dev/null; done & 2>/dev/null
+for i in {1..400} ; do mv $M0/RENAME/file$i $M0/RENAME/rn-file$i 2>/dev/null; done &
+for i in {1..400} ; do ln $M0/LINK/file$i $M0/LINK/ln-file$i 2>/dev/null; done &
+for i in {1..400} ; do rm -f $M0/UNLINK/file$i 2>/dev/null; done &
+for i in {1..400} ; do rmdir $M0/RMDIR/dir$i 2>/dev/null; done &
+for i in {1..400} ; do ln -s $M0/SYMLINK/file$i $M0/SYMLINK/sym-file$i 2>/dev/null; done &
+
+sleep 1
+TEST $CLI snapshot create $S1 $V0 no-timestamp
+TEST snapshot_exists 0 $S1
+
+TEST grep '"Enabled changelog barrier"' /var/log/glusterfs/bricks/$BRICK_LOG.log
+TEST grep '"Disabled changelog barrier"' /var/log/glusterfs/bricks/$BRICK_LOG.log
+
+TEST glusterfs -s $H0 --volfile-id=/snaps/$S1/$V0 $M1
+
+#Clean up
+TEST $CLI volume stop $V0 force
+cleanup;
diff --git a/tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t b/tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t
new file mode 100644
index 00000000000..eb5d3305ac1
--- /dev/null
+++ b/tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t
@@ -0,0 +1,142 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../dht.rc
+
+
+# Check if every single rebalance process migrated some files
+
+function cluster_rebal_all_nodes_migrated_files {
+ val=0
+ a=$($CLI_1 volume rebalance $V0 status | grep "completed" | awk '{print $2}');
+ b=($a)
+ for i in "${b[@]}"
+ do
+ if [ "$i" -eq "0" ]; then
+ echo "false";
+ val=1;
+ fi
+ done
+ echo $val
+}
+
+cleanup
+
+TEST launch_cluster 3;
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+
+#Start with a pure distribute volume (multiple bricks on the same node)
+TEST $CLI_1 volume create $V0 $H1:$B1/dist1 $H1:$B1/dist2 $H2:$B2/dist3 $H2:$B2/dist4
+
+TEST $CLI_1 volume start $V0
+$CLI_1 volume info $V0
+
+#TEST $CLI_1 volume set $V0 client-log-level DEBUG
+
+## Mount FUSE
+TEST glusterfs -s $H1 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/dir1 2>/dev/null;
+TEST touch $M0/dir1/file-{1..500}
+
+## Add-brick and run rebalance to force file migration
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/dist5 $H2:$B2/dist6
+
+#Start a rebalance
+TEST $CLI_1 volume rebalance $V0 start force
+
+#volume rebalance status should work
+#TEST $CLI_1 volume rebalance $V0 status
+#$CLI_1 volume rebalance $V0 status
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
+EXPECT "0" cluster_rebal_all_nodes_migrated_files
+$CLI_1 volume rebalance $V0 status
+
+
+TEST umount -f $M0
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+
+##############################################################
+
+# Next, a dist-rep volume
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/drep1 $H2:$B2/drep1 $H1:$B1/drep2 $H2:$B2/drep2
+
+TEST $CLI_1 volume start $V0
+$CLI_1 volume info $V0
+
+#TEST $CLI_1 volume set $V0 client-log-level DEBUG
+
+## Mount FUSE
+TEST glusterfs -s $H1 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/dir1 2>/dev/null;
+TEST touch $M0/dir1/file-{1..500}
+
+## Add-brick and run rebalance to force file migration
+TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/drep3 $H2:$B2/drep3
+
+#Start a rebalance
+TEST $CLI_1 volume rebalance $V0 start force
+
+#volume rebalance status should work
+#TEST $CLI_1 volume rebalance $V0 status
+#$CLI_1 volume rebalance $V0 status
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
+#EXPECT "0" cluster_rebal_all_nodes_migrated_files
+$CLI_1 volume rebalance $V0 status
+
+
+TEST umount -f $M0
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+##############################################################
+
+# Next, a disperse volume
+TEST $CLI_1 volume create $V0 disperse 3 $H1:$B1/ec1 $H2:$B1/ec2 $H3:$B1/ec3 force
+
+TEST $CLI_1 volume start $V0
+$CLI_1 volume info $V0
+
+#TEST $CLI_1 volume set $V0 client-log-level DEBUG
+
+## Mount FUSE
+TEST glusterfs -s $H1 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/dir1 2>/dev/null;
+TEST touch $M0/dir1/file-{1..500}
+
+## Add-brick and run rebalance to force file migration
+TEST $CLI_1 volume add-brick $V0 $H1:$B2/ec4 $H2:$B2/ec5 $H3:$B2/ec6
+
+#Start a rebalance
+TEST $CLI_1 volume rebalance $V0 start force
+
+#volume rebalance status should work
+#TEST $CLI_1 volume rebalance $V0 status
+#$CLI_1 volume rebalance $V0 status
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
+
+# this will not work unless EC is changed to return all node-uuids
+# comment this out once that patch is ready
+#EXPECT "0" cluster_rebal_all_nodes_migrated_files
+$CLI_1 volume rebalance $V0 status
+
+
+TEST umount -f $M0
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+##############################################################
+
+cleanup
+#G_TESTDEF_TEST_STATUS_NETBSD7=1501388
diff --git a/tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t b/tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t
new file mode 100644
index 00000000000..42808ce0c0e
--- /dev/null
+++ b/tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This test checks that partial failure of fop results in main fop failure only
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0..5}
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=/$V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+TEST dd if=/dev/urandom of=$M0/a bs=12347 count=1
+TEST dd if=/dev/urandom of=$M0/b bs=12347 count=1
+TEST cp $M0/b $M0/c
+TEST fallocate -p -l 101 $M0/c
+TEST $CLI volume stop $V0
+TEST $CLI volume set $V0 debug.delay-gen posix;
+TEST $CLI volume set $V0 delay-gen.delay-duration 10000000;
+TEST $CLI volume set $V0 delay-gen.enable WRITE;
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 disperse.quorum-count 6
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+cksum=$(dd if=$M0/a bs=12345 count=1 | md5sum | awk '{print $1}')
+truncate -s 12345 $M0/a & #While write is waiting for 5 seconds, introduce failure
+fallocate -p -l 101 $M0/b &
+sleep 1
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST wait
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+EXPECT "12345" stat --format=%s $M0/a
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0
+cksum_after_heal=$(dd if=$M0/a | md5sum | awk '{print $1}')
+TEST [[ $cksum == $cksum_after_heal ]]
+cksum=$(dd if=$M0/c | md5sum | awk '{print $1}')
+cksum_after_heal=$(dd if=$M0/b | md5sum | awk '{print $1}')
+TEST [[ $cksum == $cksum_after_heal ]]
+
+cleanup;
diff --git a/tests/000-flaky/basic_mount-nfs-auth.t b/tests/000-flaky/basic_mount-nfs-auth.t
new file mode 100644
index 00000000000..3d4a9cff00b
--- /dev/null
+++ b/tests/000-flaky/basic_mount-nfs-auth.t
@@ -0,0 +1,342 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+# Our mount timeout must be as long as the time for a regular configuration
+# change to be acted upon *plus* AUTH_REFRESH_TIMEOUT, not one replacing the
+# other. Otherwise this process races vs. the one making the change we're
+# trying to test, which leads to spurious failures.
+MY_MOUNT_TIMEOUT=$((CONFIG_UPDATE_TIMEOUT+AUTH_REFRESH_INTERVAL))
+
+cleanup;
+## Check whether glusterd is running
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+H0IP=$(ip addr show |grep -w inet |grep -v 127.0.0.1|awk '{ print $2 }'| cut -d "/" -f 1)
+H0IP6=$(host $HOSTNAME | grep IPv6 | awk '{print $NF}')
+
+# Export variables for allow & deny
+EXPORT_ALLOW="/$V0 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
+EXPORT_ALLOW_SLASH="/$V0/ $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
+EXPORT_DENY="/$V0 1.2.3.4(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
+
+# Netgroup variables for allow & deny
+NETGROUP_ALLOW="ngtop ng1000\nng1000 ng999\nng999 ng1\nng1 ng2\nng2 ($H0,,)"
+NETGROUP_DENY="ngtop ng1000\nng1000 ng999\nng999 ng1\nng1 ng2\nng2 (1.2.3.4,,)"
+
+V0L1="$V0/L1"
+V0L2="$V0L1/L2"
+V0L3="$V0L2/L3"
+
+# Other variations for allow & deny
+EXPORT_ALLOW_RO="/$V0 $H0(sec=sys,ro,anonuid=0) @ngtop(sec=sys,ro,anonuid=0)"
+EXPORT_ALLOW_L1="/$V0L1 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
+EXPORT_WILDCARD="/$V0 *(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
+
+function build_dirs () {
+ mkdir -p $B0/b{0,1,2}/L1/L2/L3
+}
+
+function export_allow_this_host_ipv6 () {
+ printf "$EXPORT_ALLOW6\n" > "$GLUSTERD_WORKDIR"/nfs/exports
+}
+
+function export_allow_this_host () {
+ printf "$EXPORT_ALLOW\n" > ${NFSDIR}/exports
+}
+
+function export_allow_this_host_with_slash () {
+ printf "$EXPORT_ALLOW_SLASH\n" > ${NFSDIR}/exports
+}
+
+function export_deny_this_host () {
+ printf "$EXPORT_DENY\n" > ${NFSDIR}/exports
+}
+
+function export_allow_this_host_l1 () {
+ printf "$EXPORT_ALLOW_L1\n" >> ${NFSDIR}/exports
+}
+
+function export_allow_wildcard () {
+ printf "$EXPORT_WILDCARD\n" > ${NFSDIR}/exports
+}
+
+function export_allow_this_host_ro () {
+ printf "$EXPORT_ALLOW_RO\n" > ${NFSDIR}/exports
+}
+
+function netgroup_allow_this_host () {
+ printf "$NETGROUP_ALLOW\n" > ${NFSDIR}/netgroups
+}
+
+function netgroup_deny_this_host () {
+ printf "$NETGROUP_DENY\n" > ${NFSDIR}/netgroups
+}
+
+function create_vol () {
+ $CLI vol create $V0 $H0:$B0/b0
+}
+
+function setup_cluster() {
+ build_dirs # Build directories
+ export_allow_this_host # Allow this host in the exports file
+ netgroup_allow_this_host # Allow this host in the netgroups file
+
+ glusterd
+ create_vol # Create the volume
+}
+
+function check_mount_success {
+ mount_nfs $H0:/$1 $N0 nolock
+ if [ $? -eq 0 ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function check_mount_failure {
+ mount_nfs $H0:/$1 $N0 nolock
+ if [ $? -ne 0 ]; then
+ echo "Y"
+ else
+ local timeout=$UMOUNT_TIMEOUT
+ while ! umount_nfs $N0 && [$timeout -ne 0] ; do
+ timeout=$(( $timeout - 1 ))
+ sleep 1
+ done
+ fi
+}
+
+function small_write () {
+ dd if=/dev/zero of=$N0/test-small-write count=1 bs=1k 2>&1
+ if [ $? -ne 0 ]; then
+ echo "N"
+ else
+ echo "Y"
+ fi
+}
+
+function bg_write () {
+ dd if=/dev/zero of=$N0/test-bg-write count=1 bs=1k &
+ BG_WRITE_PID=$!
+}
+
+function big_write() {
+ dd if=/dev/zero of=$N0/test-big-write count=500 bs=1024k
+}
+
+function create () {
+ touch $N0/create-test
+}
+
+function stat_nfs () {
+ ls $N0/
+}
+
+# Restarts the NFS server
+function restart_nfs () {
+ local NFS_PID=$(cat $GLUSTERD_PIDFILEDIR/nfs/nfs.pid)
+
+ # kill the NFS-server if it is running
+ while ps -q ${NFS_PID} 2>&1 > /dev/null; do
+ kill ${NFS_PID}
+ sleep 0.5
+ done
+
+ # start-force starts the NFS-server again
+ $CLI vol start patchy force
+}
+
+setup_cluster
+
+# run preliminary tests
+TEST $CLI vol set $V0 nfs.disable off
+TEST $CLI vol start $V0
+
+# Get NFS state directory
+NFSDIR=$( $CLI volume get patchy nfs.mount-rmtab | \
+ awk '/^nfs.mount-rmtab/{print $2}' | \
+ xargs dirname )
+
+## Wait for volume to register with rpc.mountd
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+
+## NFS server starts with auth disabled
+## Do some tests to verify that.
+
+EXPECT "Y" check_mount_success $V0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Disallow host
+TEST export_deny_this_host
+TEST netgroup_deny_this_host
+
+## Technically deauthorized this host, but since auth is disabled we should be
+## able to do mounts, writes, etc.
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
+EXPECT "Y" small_write
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Reauthorize this host
+export_allow_this_host
+netgroup_allow_this_host
+
+## Restart NFS with auth enabled
+$CLI vol stop $V0
+TEST $CLI vol set $V0 nfs.exports-auth-enable on
+$CLI vol start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+
+## Mount NFS
+EXPECT "Y" check_mount_success $V0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount NFS using the IPv6 export
+export_allow_this_host_ipv6
+EXPECT "Y" check_mount_success $V0
+
+## Disallow host
+TEST export_deny_this_host
+TEST netgroup_deny_this_host
+
+## Writes should not be allowed, host is not authorized
+EXPECT_WITHIN $AUTH_REFRESH_INTERVAL "N" small_write
+
+## Unmount so we can test mount
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Subsequent ounts should not be allowed, host is not authorized
+EXPECT "Y" check_mount_failure $V0
+
+## Reauthorize host
+TEST export_allow_this_host
+TEST netgroup_allow_this_host
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Allow host in netgroups but not in exports, host should be allowed
+TEST export_deny_this_host
+TEST netgroup_allow_this_host
+
+# wait for the mount authentication to rebuild
+sleep $[$AUTH_REFRESH_INTERVAL + 1]
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
+EXPECT "Y" small_write
+TEST big_write
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Allow host in exports but not in netgroups, host should be allowed
+TEST export_allow_this_host
+TEST netgroup_deny_this_host
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Finally, reauth the host in export and netgroup, test mount & write
+TEST export_allow_this_host_l1
+TEST netgroup_allow_this_host
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0L1
+EXPECT "Y" small_write
+
+## Failover test: Restarting NFS and then doing a write should pass
+bg_write
+TEST restart_nfs
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+
+TEST wait $BG_WRITE_PID
+EXPECT "Y" small_write
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Test deep mounts
+EXPECT "Y" check_mount_success $V0L1
+EXPECT "Y" small_write
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+TEST export_allow_this_host_ro
+TEST netgroup_deny_this_host
+
+## Restart the nfs server to avoid spurious failure(BZ1256352)
+restart_nfs
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
+EXPECT "N" small_write # Writes should not be allowed
+TEST ! create # Create should not be allowed
+TEST stat_nfs # Stat should be allowed
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+TEST export_deny_this_host
+TEST netgroup_deny_this_host
+TEST export_allow_this_host_l1 # Allow this host at L1
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_failure $V0 #V0 shouldnt be allowed
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0L1 #V0L1 should be
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Test wildcard hosts
+TEST export_allow_wildcard
+
+# the $MY_MOUNT_TIMEOUT might not be long enough? restart should do
+restart_nfs
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
+EXPECT_WITHIN $AUTH_REFRESH_INTERVAL "Y" small_write
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Test if path is parsed correctly
+## by mounting host:vol/ instead of host:vol
+EXPECT "Y" check_mount_success $V0/
+EXPECT "Y" small_write
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+TEST export_allow_this_host_with_slash
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
+EXPECT "Y" small_write
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+EXPECT "Y" check_mount_success $V0/
+EXPECT "Y" small_write
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+
+## Turn off exports authentication
+$CLI vol stop $V0
+TEST $CLI vol set $V0 nfs.exports-auth-enable off
+$CLI vol start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+
+TEST export_deny_this_host # Deny the host
+TEST netgroup_deny_this_host
+
+EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 # Do a mount & test
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Turn back on the exports authentication
+$CLI vol stop $V0
+TEST $CLI vol set $V0 nfs.exports-auth-enable on
+$CLI vol start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+
+## Do a simple test to set the refresh time to 20 seconds
+TEST $CLI vol set $V0 nfs.auth-refresh-interval-sec 20
+
+## Do a simple test to see if the volume option exists
+TEST $CLI vol set $V0 nfs.auth-cache-ttl-sec 400
+
+## Finish up
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup
diff --git a/tests/000-flaky/bugs_core_multiplex-limit-issue-151.t b/tests/000-flaky/bugs_core_multiplex-limit-issue-151.t
new file mode 100644
index 00000000000..5a88f97d726
--- /dev/null
+++ b/tests/000-flaky/bugs_core_multiplex-limit-issue-151.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../traps.rc
+. $(dirname $0)/../volume.rc
+
+function count_up_bricks {
+ $CLI --xml volume status all | grep '<status>1' | wc -l
+}
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+function count_brick_pids {
+ $CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
+}
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume set all cluster.brick-multiplex on
+TEST ! $CLI volume set all cluster.max-bricks-per-process -1
+TEST ! $CLI volume set all cluster.max-bricks-per-process foobar
+TEST $CLI volume set all cluster.max-bricks-per-process 3
+
+TEST $CLI volume create $V0 $H0:$B0/brick{0..5}
+TEST $CLI volume start $V0
+
+EXPECT 2 count_brick_processes
+EXPECT 2 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 count_up_bricks
+
+pkill gluster
+TEST glusterd
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 count_up_bricks
+
+TEST $CLI volume add-brick $V0 $H0:$B0/brick6
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 7 count_up_bricks
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/brick3 start
+TEST $CLI volume remove-brick $V0 $H0:$B0/brick3 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 count_up_bricks
+
+cleanup;
diff --git a/tests/000-flaky/bugs_distribute_bug-1117851.t b/tests/000-flaky/bugs_distribute_bug-1117851.t
new file mode 100644
index 00000000000..5980bf2fd4b
--- /dev/null
+++ b/tests/000-flaky/bugs_distribute_bug-1117851.t
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=250
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+create_files () {
+ for i in {1..1000}; do
+ orig=$(printf %s/abc%04d $1 $i)
+ real=$(printf %s/src%04d $1 $i)
+ # Make sure lots of these have linkfiles.
+ echo "This is file $i" > $orig
+ mv $orig $real
+ done
+ sync
+}
+
+move_files_inner () {
+ sfile=$M0/status_$(basename $1)
+ for i in {1..1000}; do
+ src=$(printf %s/src%04d $1 $i)
+ dst=$(printf %s/dst%04d $1 $i)
+ mv $src $dst 2> /dev/null
+ done
+ echo "done" > $sfile
+}
+
+move_files () {
+ #Create the status file here to prevent spurious failures
+ #caused by the file not being created in time by the
+ #background process
+ sfile=$M0/status_$(basename $1)
+ echo "running" > $sfile
+ move_files_inner $* &
+}
+
+check_files () {
+ errors=0
+ for i in {1..1000}; do
+ if [ ! -f $(printf %s/dst%04d $1 $i) ]; then
+ if [ -f $(printf %s/src%04d $1 $i) ]; then
+ echo "file $i didnt get moved" > /dev/stderr
+ else
+ echo "file $i is MISSING" > /dev/stderr
+ errors=$((errors+1))
+ fi
+ fi
+ done
+ if [ $((errors)) != 0 ]; then
+ : ls -l $1 > /dev/stderr
+ fi
+ return $errors
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '6' brick_count $V0
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount FUSE with caching disabled (read-write)
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+TEST create_files $M0
+
+## Mount FUSE with caching disabled (read-write) again
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1;
+
+TEST move_files $M0
+TEST move_files $M1
+
+# It's regrettable that renaming 1000 files might take more than 30 seconds,
+# but on our test systems sometimes it does, so double the time from what we'd
+# use otherwise. There still seem to be some spurious failures, 1 in 20 when
+# this does not complete, added an additional 60 seconds to take false reports
+# out of the system, during test runs, especially on slower test systems.
+EXPECT_WITHIN 120 "done" cat $M0/status_0
+EXPECT_WITHIN 120 "done" cat $M1/status_1
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+TEST check_files $M0
+
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/000-flaky/bugs_distribute_bug-1122443.t b/tests/000-flaky/bugs_distribute_bug-1122443.t
new file mode 100644
index 00000000000..abd37082b33
--- /dev/null
+++ b/tests/000-flaky/bugs_distribute_bug-1122443.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+
+make_files() {
+ mkdir $1 && \
+ ln -s ../ $1/symlink && \
+ mknod $1/special_b b 1 2 && \
+ mknod $1/special_c c 3 4 && \
+ mknod $1/special_u u 5 6 && \
+ mknod $1/special_p p && \
+ touch -h --date=@1 $1/symlink && \
+ touch -h --date=@2 $1/special_b &&
+ touch -h --date=@3 $1/special_c &&
+ touch -h --date=@4 $1/special_u &&
+ touch -h --date=@5 $1/special_p
+}
+
+bug_1113050_workaround() {
+ # Test if graph change has settled (bug-1113050?)
+ test=$(stat -c "%n:%Y" $1 2>&1 | tr '\n' ',')
+ if [ $? -eq 0 ] ; then
+ echo RECONNECTED
+ else
+ echo WAITING
+ fi
+ return 0
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume start $V0
+
+# Mount FUSE and create symlink
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST make_files $M0/subdir
+
+# Get mtime before migration
+BEFORE="$(stat -c %n:%Y $M0/subdir/* | sort | tr '\n' ',')"
+echo $BEFORE
+# Migrate brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}0"
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 commit
+
+# Get mtime after migration
+EXPECT_WITHIN 30 RECONNECTED bug_1113050_workaround $M0/subdir/symlink
+sleep 3
+AFTER="$(stat -c %n:%Y $M0/subdir/* | sort | tr '\n' ',')"
+echo $AFTER
+# Check if mtime is unchanged
+TEST [ "$AFTER" == "$BEFORE" ]
+
+cleanup
diff --git a/tests/000-flaky/bugs_glusterd_bug-857330/common.rc b/tests/000-flaky/bugs_glusterd_bug-857330/common.rc
new file mode 100644
index 00000000000..bd122eff18c
--- /dev/null
+++ b/tests/000-flaky/bugs_glusterd_bug-857330/common.rc
@@ -0,0 +1,57 @@
+. $(dirname $0)/../../include.rc
+
+UUID_REGEX='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
+
+TASK_ID=""
+COMMAND=""
+PATTERN=""
+
+function check-and-store-task-id()
+{
+ TASK_ID=""
+
+ local task_id=$($CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX")
+
+ if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then
+ return 1
+ fi
+
+ TASK_ID=$task_id
+ return 0;
+}
+
+function get-task-id()
+{
+ $CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX" | tail -n1
+
+}
+
+function check-and-store-task-id-xml()
+{
+ TASK_ID=""
+
+ local task_id=$($CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX")
+
+ if [ -z "$task_id" ] && [ "${task_id+asdf}" = "asdf" ]; then
+ return 1
+ fi
+
+ TASK_ID=$task_id
+ return 0;
+}
+
+function get-task-id-xml()
+{
+ $CLI $COMMAND --xml | xmllint --format - | grep $PATTERN | grep -o -E "$UUID_REGEX"
+}
+
+function get-task-status()
+{
+ pattern=$1
+ val=1
+ test=$(gluster $COMMAND | grep -o $pattern 2>&1)
+ if [ $? -eq 0 ]; then
+ val=0
+ fi
+ echo $val
+}
diff --git a/tests/000-flaky/bugs_glusterd_bug-857330/normal.t b/tests/000-flaky/bugs_glusterd_bug-857330/normal.t
new file mode 100755
index 00000000000..6c1cf54ec3c
--- /dev/null
+++ b/tests/000-flaky/bugs_glusterd_bug-857330/normal.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+. $(dirname $0)/common.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}1 $H0:$B0/${V0}2;
+TEST $CLI volume info $V0;
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
+
+TEST $PYTHON $(dirname $0)/../../utils/create-files.py \
+ --multi -b 10 -d 10 -n 10 $M0;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+###############
+## Rebalance ##
+###############
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}3 $H0:$B0/${V0}4;
+
+COMMAND="volume rebalance $V0 start"
+PATTERN="ID:"
+TEST check-and-store-task-id
+
+COMMAND="volume status $V0"
+PATTERN="ID"
+EXPECT $TASK_ID get-task-id
+
+COMMAND="volume rebalance $V0 status"
+PATTERN="completed"
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" get-task-status $PATTERN
+
+###################
+## Replace-brick ##
+###################
+REP_BRICK_PAIR="$H0:$B0/${V0}2 $H0:$B0/${V0}5"
+
+TEST $CLI volume replace-brick $V0 $REP_BRICK_PAIR commit force;
+
+##################
+## Remove-brick ##
+##################
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}5 start"
+PATTERN="ID:"
+TEST check-and-store-task-id
+
+COMMAND="volume status $V0"
+PATTERN="ID"
+EXPECT $TASK_ID get-task-id
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}5 status"
+PATTERN="completed"
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" get-task-status $PATTERN
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}5 commit
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/000-flaky/bugs_glusterd_bug-857330/xml.t b/tests/000-flaky/bugs_glusterd_bug-857330/xml.t
new file mode 100755
index 00000000000..11785adacdb
--- /dev/null
+++ b/tests/000-flaky/bugs_glusterd_bug-857330/xml.t
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+. $(dirname $0)/common.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}1 $H0:$B0/${V0}2;
+TEST $CLI volume info $V0;
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
+
+TEST $PYTHON $(dirname $0)/../../utils/create-files.py \
+ --multi -b 10 -d 10 -n 10 $M0;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+
+###############
+## Rebalance ##
+###############
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}3 $H0:$B0/${V0}4;
+
+COMMAND="volume rebalance $V0 start"
+PATTERN="task-id"
+TEST check-and-store-task-id-xml
+
+COMMAND="volume status $V0"
+PATTERN="id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume rebalance $V0 status"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+## TODO: Add tests for rebalance stop
+
+COMMAND="volume rebalance $V0 status"
+PATTERN="completed"
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" get-task-status $PATTERN
+
+###################
+## Replace-brick ##
+###################
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}4 $H0:$B0/${V0}5 commit force
+
+##################
+## Remove-brick ##
+##################
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 $H0:$B0/${V0}5 start"
+PATTERN="task-id"
+TEST check-and-store-task-id-xml
+
+COMMAND="volume status $V0"
+PATTERN="id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 $H0:$B0/${V0}5 status"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 $H0:$B0/${V0}5 status"
+PATTERN="completed"
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" get-task-status $PATTERN
+
+## TODO: Add tests for remove-brick stop
+
+COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 $H0:$B0/${V0}5 commit"
+PATTERN="task-id"
+EXPECT $TASK_ID get-task-id-xml
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/000-flaky/bugs_glusterd_quorum-value-check.t b/tests/000-flaky/bugs_glusterd_quorum-value-check.t
new file mode 100644
index 00000000000..a431b8c4fd4
--- /dev/null
+++ b/tests/000-flaky/bugs_glusterd_quorum-value-check.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+function check_quorum_nfs() {
+ local qnfs="$(less /var/lib/glusterd/nfs/nfs-server.vol | grep "quorum-count"| awk '{print $3}')"
+ local qinfo="$($CLI volume info $V0| grep "cluster.quorum-count"| awk '{print $2}')"
+
+ if [ $qnfs = $qinfo ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 cluster.quorum-count 1
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "Y" check_quorum_nfs
+TEST $CLI volume set $V0 cluster.quorum-count 2
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "Y" check_quorum_nfs
+TEST $CLI volume set $V0 cluster.quorum-count 3
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "Y" check_quorum_nfs
+
+cleanup;
diff --git a/tests/000-flaky/bugs_nfs_bug-1116503.t b/tests/000-flaky/bugs_nfs_bug-1116503.t
new file mode 100644
index 00000000000..fc50021acc7
--- /dev/null
+++ b/tests/000-flaky/bugs_nfs_bug-1116503.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# Verify that mounting NFS over UDP (MOUNT service only) works.
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume set $V0 nfs.mount-udp on
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+TEST mount_nfs $H0:/$V0 $N0 nolock,mountproto=udp,proto=tcp;
+TEST mkdir -p $N0/foo/bar
+TEST ls $N0/foo
+TEST ls $N0/foo/bar
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0/foo $N0 nolock,mountproto=udp,proto=tcp;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0/foo/bar $N0 nolock,mountproto=udp,proto=tcp;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+TEST $CLI volume set $V0 nfs.addr-namelookup on
+TEST $CLI volume set $V0 nfs.rpc-auth-allow $H0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0/foo/bar $N0 nolock,mountproto=udp,proto=tcp;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+TEST $CLI volume set $V0 nfs.rpc-auth-reject $H0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST ! mount_nfs $H0:/$V0/foo/bar $N0 nolock,mountproto=udp,proto=tcp;
+
+cleanup;
diff --git a/tests/000-flaky/features_lock-migration_lkmigration-set-option.t b/tests/000-flaky/features_lock-migration_lkmigration-set-option.t
new file mode 100644
index 00000000000..1327ef3579f
--- /dev/null
+++ b/tests/000-flaky/features_lock-migration_lkmigration-set-option.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Test to check
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#Check lock-migration set option sanity
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 lock-migration on
+EXPECT "on" echo `$CLI volume info | grep lock-migration | awk '{print $2}'`
+TEST $CLI volume set $V0 lock-migration off
+EXPECT "off" echo `$CLI volume info | grep lock-migration | awk '{print $2}'`
+TEST ! $CLI volume set $V0 lock-migration garbage
+#make sure it is still off
+EXPECT "off" echo `$CLI volume info | grep lock-migration | awk '{print $2}'`
+
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+
+#create a afr volume and make sure option setting fails
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2
+TEST $CLI volume start $V0
+
+TEST ! $CLI volume set $V0 lock-migration on
+
+cleanup;