summaryrefslogtreecommitdiffstats
path: root/tests/bugs/snapshot
diff options
context:
space:
mode:
Diffstat (limited to 'tests/bugs/snapshot')
-rw-r--r--tests/bugs/snapshot/bug-1109889.t4
-rwxr-xr-xtests/bugs/snapshot/bug-1111041.t40
-rw-r--r--tests/bugs/snapshot/bug-1140162-file-snapshot-features-encrypt-opts-validation.t33
-rw-r--r--tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t6
-rw-r--r--tests/bugs/snapshot/bug-1164613.t35
-rwxr-xr-xtests/bugs/snapshot/bug-1166197.t3
-rw-r--r--tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t5
-rw-r--r--tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t1
-rw-r--r--tests/bugs/snapshot/bug-1227646.t1
-rw-r--r--tests/bugs/snapshot/bug-1260848.t28
-rwxr-xr-xtests/bugs/snapshot/bug-1275616.t50
-rw-r--r--tests/bugs/snapshot/bug-1279327.t29
-rw-r--r--tests/bugs/snapshot/bug-1316437.t29
-rw-r--r--tests/bugs/snapshot/bug-1322772-real-path-fix-for-snapshot.t56
-rwxr-xr-xtests/bugs/snapshot/bug-1399598-uss-with-ssl.t108
-rw-r--r--tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t133
-rw-r--r--tests/bugs/snapshot/bug-1512451-snapshot-creation-failed-after-brick-reset.t39
-rw-r--r--tests/bugs/snapshot/bug-1597662.t58
-rw-r--r--tests/bugs/snapshot/bug-1618004-fix-memory-corruption-in-snap-import.t48
19 files changed, 631 insertions, 75 deletions
diff --git a/tests/bugs/snapshot/bug-1109889.t b/tests/bugs/snapshot/bug-1109889.t
index 6b29cdd9eb1..5fdc7dc9506 100644
--- a/tests/bugs/snapshot/bug-1109889.t
+++ b/tests/bugs/snapshot/bug-1109889.t
@@ -19,9 +19,9 @@ TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
TEST $CLI volume start $V0;
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
-MOUNT_PID=`ps ax |grep "glusterfs --volfile-sever $H0 --volfile-id=$V0 $M0" | grep -v grep | awk '{print $1}' | head -1`
+MOUNT_PID=$(get_mount_process_pid $V0 $M0)
for i in {1..10} ; do echo "file" > $M0/file$i ; done
diff --git a/tests/bugs/snapshot/bug-1111041.t b/tests/bugs/snapshot/bug-1111041.t
new file mode 100755
index 00000000000..efda9688d8b
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1111041.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+function is_snapd_running {
+ $CLI volume status $1 | grep "Snapshot Daemon" | wc -l;
+}
+
+function snapd_pid {
+ $CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $8'}
+}
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+TEST $CLI volume start $V0;
+
+EXPECT "0" is_snapd_running $v0
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT "1" is_snapd_running $V0
+
+SNAPD_PID=$(snapd_pid);
+
+TEST [ $SNAPD_PID -gt 0 ]
+
+kill -9 $SNAPD_PID
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^N/A$" snapd_pid
+
+cleanup ;
diff --git a/tests/bugs/snapshot/bug-1140162-file-snapshot-features-encrypt-opts-validation.t b/tests/bugs/snapshot/bug-1140162-file-snapshot-features-encrypt-opts-validation.t
deleted file mode 100644
index f91093db4e7..00000000000
--- a/tests/bugs/snapshot/bug-1140162-file-snapshot-features-encrypt-opts-validation.t
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-## Test case for BZ-1140160 Volume option set <vol> <file-snapshot> and
-## <features.encryption> <value> command input should validate correctly.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd;
-TEST pidof glusterd;
-
-## Lets create and start volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume start $V0
-
-## Set features.file-snapshot and features.encryption option with non-boolean
-## value. These options should fail.
-TEST ! $CLI volume set $V0 features.file-snapshot abcd
-TEST ! $CLI volume set $V0 features.encryption redhat
-
-## Set other options with valid value. These options should succeed.
-TEST $CLI volume set $V0 barrier enable
-TEST $CLI volume set $V0 ping-timeout 60
-
-## Set features.file-snapshot and features.encryption option with valid boolean
-## value. These options should succeed.
-TEST $CLI volume set $V0 features.file-snapshot on
-TEST $CLI volume set $V0 features.encryption on
-
-cleanup;
diff --git a/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t b/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t
index 6697c263ac1..c5a285eb775 100644
--- a/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t
+++ b/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t
@@ -21,16 +21,16 @@ TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
# in the USS world
gluster snapshot config activate-on-create enable
for i in {1..10}; do $CLI snapshot create snap$i $V0 no-timestamp; done
-EXPECT 10 uss_count_snap_displayed $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 10 uss_count_snap_displayed $M0
# snapshots should not be displayed after deactivation
for i in {1..10}; do $CLI snapshot deactivate snap$i --mode=script; done
-EXPECT 0 uss_count_snap_displayed $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 uss_count_snap_displayed $M0
# activate all the snapshots and check if all the activated snapshots
# are displayed again
for i in {1..10}; do $CLI snapshot activate snap$i --mode=script; done
-EXPECT 10 uss_count_snap_displayed $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 10 uss_count_snap_displayed $M0
cleanup;
diff --git a/tests/bugs/snapshot/bug-1164613.t b/tests/bugs/snapshot/bug-1164613.t
deleted file mode 100644
index 225234dc43e..00000000000
--- a/tests/bugs/snapshot/bug-1164613.t
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../snapshot.rc
-
-cleanup;
-TEST verify_lvm_version;
-TEST glusterd;
-TEST pidof glusterd;
-
-TEST setup_lvm 1
-
-TEST $CLI volume create $V0 $H0:$L1
-TEST $CLI volume start $V0
-TEST glusterfs -s $H0 --volfile-id=$V0 $M0
-
-TEST touch $M0/testfile
-
-TEST $CLI snapshot create snaps $V0 no-timestamp
-TEST $CLI snapshot activate snaps
-TEST $CLI volume set $V0 features.uss enable
-TEST $CLI volume set $V0 snapshot-directory snaps
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/snaps/snaps/testfile
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-
-#Clean up
-TEST $CLI snapshot delete snaps
-TEST $CLI volume stop $V0 force
-TEST $CLI volume delete $V0
-
-cleanup;
-
diff --git a/tests/bugs/snapshot/bug-1166197.t b/tests/bugs/snapshot/bug-1166197.t
index 739839fab81..b070ae271ba 100755
--- a/tests/bugs/snapshot/bug-1166197.t
+++ b/tests/bugs/snapshot/bug-1166197.t
@@ -5,6 +5,8 @@
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
CURDIR=`pwd`
@@ -15,6 +17,7 @@ TEST pidof glusterd;
TEST setup_lvm 1
TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume set $V0 nfs.disable false
TEST $CLI volume start $V0
TEST $CLI snapshot config activate-on-create enable
TEST $CLI volume set $V0 features.uss enable
diff --git a/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t b/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t
index 0893826b343..52a7a790b97 100644
--- a/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t
+++ b/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t
@@ -4,6 +4,8 @@
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../snapshot.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
# This function returns a value "Y" if user can execute
# the given command. Else it will return "N"
# @arg-1 : Name of the user
@@ -72,6 +74,7 @@ TEST setup_lvm 1
TEST glusterd
TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume set $V0 nfs.disable false
TEST $CLI volume start $V0
# Mount the volume as both fuse and nfs mount
@@ -198,3 +201,5 @@ TEST $CLI snapshot delete all
cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t b/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t
index 366937245f2..addc05917d8 100644
--- a/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t
+++ b/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t
@@ -27,7 +27,6 @@ EXPECT '1' get_snap_count CLI_1 $V0
TEST $CLI_1 volume stop $V0
EXPECT 'Stopped' volinfo_field $V0 'Status'
-EXPECT "1" get_aux
TEST $CLI_1 snapshot restore $($CLI_1 snapshot list)
EXPECT '0' get_snap_count CLI_1 $V0
diff --git a/tests/bugs/snapshot/bug-1227646.t b/tests/bugs/snapshot/bug-1227646.t
index 643d814e2ee..9b73dfdb32f 100644
--- a/tests/bugs/snapshot/bug-1227646.t
+++ b/tests/bugs/snapshot/bug-1227646.t
@@ -20,7 +20,6 @@ TEST $CLI snapshot create snap1 $V0 no-timestamp;
TEST $CLI volume stop $V0
TEST $CLI snapshot restore snap1;
TEST $CLI volume start $V0
-TEST $CLI volume attach-tier $V0 $H0:$L1 $H0:$L2
TEST pkill gluster
TEST glusterd
diff --git a/tests/bugs/snapshot/bug-1260848.t b/tests/bugs/snapshot/bug-1260848.t
new file mode 100644
index 00000000000..6455d8297b2
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1260848.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume set $V0 uss on
+TEST $CLI volume start $V0
+
+## Wait for volume to register with rpc.mountd
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+## Mount NFS
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+TEST df -h $N0
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1275616.t b/tests/bugs/snapshot/bug-1275616.t
new file mode 100755
index 00000000000..dcaeae30f90
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1275616.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+TEST $CLI snapshot config activate-on-create enable
+
+TEST $CLI snapshot config $V0 snap-max-hard-limit 100
+TEST $CLI snapshot create snap1 $V0 no-timestamp
+
+TEST $CLI snapshot config $V0 snap-max-hard-limit 150
+TEST $CLI snapshot create snap2 $V0 no-timestamp
+
+TEST $CLI snapshot config $V0 snap-max-hard-limit 200
+TEST $CLI snapshot create snap3 $V0 no-timestamp
+EXPECT '197' snap_info_volume CLI "Snaps Available" $V0;
+
+TEST $CLI volume stop $V0
+
+# Restore the snapshots and verify the snap-max-hard-limit
+# and the Snaps Available
+TEST $CLI snapshot restore snap1
+EXPECT '98' snap_info_volume CLI "Snaps Available" $V0;
+EXPECT '100' snap_config_volume CLI 'snap-max-hard-limit' $V0
+
+TEST $CLI snapshot restore snap2
+EXPECT '149' snap_info_volume CLI "Snaps Available" $V0;
+EXPECT '150' snap_config_volume CLI 'snap-max-hard-limit' $V0
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Yes" get_snap_brick_status snap3
+
+#Take a clone and verify it inherits snapshot's snap-max-hard-limit
+TEST $CLI snapshot clone clone1 snap3
+
+EXPECT '149' snap_info_volume CLI "Snaps Available" $V0;
+EXPECT '150' snap_config_volume CLI 'snap-max-hard-limit' $V0
+
+EXPECT '200' snap_info_volume CLI "Snaps Available" clone1
+EXPECT '200' snap_config_volume CLI 'snap-max-hard-limit' clone1
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1279327.t b/tests/bugs/snapshot/bug-1279327.t
new file mode 100644
index 00000000000..4e4be6eeea6
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1279327.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST init_n_bricks 3
+TEST setup_lvm 3
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+TEST $CLI volume quota $V0 enable
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp
+TEST $CLI snapshot activate snap1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Yes" get_snap_brick_status snap1
+
+#Take a clone and verify it inherits snapshot's snap-max-hard-limit
+TEST $CLI snapshot clone clone1 snap1
+TEST $CLI volume start clone1
+EXPECT 'Started' volinfo_field clone1 'Status';
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1316437.t b/tests/bugs/snapshot/bug-1316437.t
new file mode 100644
index 00000000000..300c03c97f5
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1316437.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST glusterd
+
+# Intentionally not carving lvms for this as we will not be taking
+# snapshots in this testcase
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+TEST $CLI volume start $V0;
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+killall glusterd glusterfsd glusterfs
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 'N' check_if_snapd_exist
+
+glusterd
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1322772-real-path-fix-for-snapshot.t b/tests/bugs/snapshot/bug-1322772-real-path-fix-for-snapshot.t
new file mode 100644
index 00000000000..488bd462a01
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1322772-real-path-fix-for-snapshot.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../include.rc
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=2
+
+TEST verify_lvm_version
+TEST init_n_bricks 2
+TEST setup_lvm 2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$L1
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI volume create $V1 $H0:$L2
+EXPECT 'Created' volinfo_field $V1 'Status'
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $CLI volume start $V1
+EXPECT 'Started' volinfo_field $V1 'Status'
+
+TEST $CLI snapshot config activate-on-create enable
+TEST $CLI snapshot create ${V0}_snap $V0 no-timestamp
+TEST $CLI snapshot create ${V1}_snap $V1 no-timestamp
+
+# Simulate a node reboot by unmounting the brick, snap_brick and followed by
+# deleting the brick. Now once glusterd restarts, it should be able to construct
+# and remount the snap brick
+snap_bricks=`gluster snap status | grep "Brick Path" | awk -F ":" '{print $3}'`
+
+TEST $CLI volume stop $V1
+TEST $CLI snapshot restore ${V1}_snap;
+
+pkill gluster
+for snap_brick in $snap_bricks
+do
+ echo "Unmounting snap brick" $snap_brick
+ EXPECT_WITHIN_TEST_IN_LOOP $UMOUNT_TIMEOUT "Y" force_umount $snap_brick
+done
+
+rm -rf $snap_brick
+
+TEST glusterd
+TEST pidof glusterd
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $L1
+
+cleanup
+
diff --git a/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t b/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t
new file mode 100755
index 00000000000..f4e4e6ec4d2
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../ssl.rc
+
+function file_exists
+{
+ if [ -f $1 ]; then echo "Y"; else echo "N"; fi
+}
+
+function volume_online_brick_count
+{
+ $CLI volume status $V0 | awk '$1 == "Brick" && $6 != "N/A" { print $6}' | wc -l;
+}
+
+function total_online_bricks
+{
+ # This will count snapd, which isn't really a brick, but callers can
+ # account for that so it's OK.
+ find $GLUSTERD_PIDFILEDIR -name '*.pid' | wc -l
+}
+
+cleanup;
+
+# Initialize the test setup
+TEST setup_lvm 1;
+
+TEST create_self_signed_certs
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd;
+#EST $CLI volume set all cluster.brick-multiplex on
+
+# Create and start the volume
+TEST $CLI volume create $V0 $H0:$L1/b1;
+
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" total_online_bricks
+
+# Mount the volume and create some files
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+TEST touch $M0/file;
+
+# Enable activate-on-create
+TEST $CLI snapshot config activate-on-create enable;
+
+# Create a snapshot
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" total_online_bricks
+
+TEST $CLI volume set $V0 features.uss enable;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" total_online_bricks
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/file
+# Volume set can trigger graph switch therefore chances are we send this
+# req to old graph. Old graph will not have .snaps. Therefore we should
+# wait for some time.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/.snaps/snap1/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Enable management encryption
+touch $GLUSTERD_WORKDIR/secure-access
+killall_gluster
+
+TEST glusterd
+TEST pidof glusterd;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" total_online_bricks
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+# Mount the volume
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/file
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/.snaps/snap1/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Enable I/O encryption
+TEST $CLI volume set $V0 client.ssl on
+TEST $CLI volume set $V0 server.ssl on
+
+killall_gluster
+
+TEST glusterd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" total_online_bricks
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+# Mount the volume
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/file
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/.snaps/snap1/file
+
+TEST $CLI snapshot delete all
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t b/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
new file mode 100644
index 00000000000..04a85db0c1a
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
@@ -0,0 +1,133 @@
+#!/bin/bash
+
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function create_snapshots() {
+ $CLI_1 snapshot create ${V0}_snap ${V0} no-timestamp &
+ PID_1=$!
+
+ $CLI_1 snapshot create ${V1}_snap ${V1} no-timestamp &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+
+function activate_snapshots() {
+ $CLI_1 snapshot activate ${V0}_snap &
+ PID_1=$!
+
+ $CLI_1 snapshot activate ${V1}_snap &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+
+function deactivate_snapshots() {
+ $CLI_1 snapshot deactivate ${V0}_snap &
+ PID_1=$!
+
+ $CLI_1 snapshot deactivate ${V1}_snap &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+cleanup;
+
+TEST verify_lvm_version;
+# Create cluster with 3 nodes
+TEST launch_cluster 3;
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count;
+
+# Create volumes
+TEST $CLI_1 volume create $V0 $H1:$L1
+TEST $CLI_2 volume create $V1 $H2:$L2 $H3:$L3
+
+# Start volumes
+TEST $CLI_1 volume start $V0
+TEST $CLI_2 volume start $V1
+
+TEST $CLI_1 snapshot config activate-on-create enable
+
+# Snapshot Operations
+create_snapshots
+
+EXPECT 'Started' snapshot_status ${V0}_snap;
+EXPECT 'Started' snapshot_status ${V1}_snap;
+
+deactivate_snapshots
+
+EXPECT 'Stopped' snapshot_status ${V0}_snap;
+EXPECT 'Stopped' snapshot_status ${V1}_snap;
+
+activate_snapshots
+
+EXPECT 'Started' snapshot_status ${V0}_snap;
+EXPECT 'Started' snapshot_status ${V1}_snap;
+
+# This Function will get snap id form snap info command and will
+# check for mount point in system against snap id.
+function mounted_snaps
+{
+ snap_id=`$CLI_1 snap info $1_snap | grep "Snap Volume Name" |
+ awk -F ":" '{print $2}'`
+ echo `mount | grep $snap_id | wc -l`
+}
+
+EXPECT "1" mounted_snaps ${V0}
+EXPECT "2" mounted_snaps ${V1}
+
+deactivate_snapshots
+
+EXPECT "0" mounted_snaps ${V0}
+EXPECT "0" mounted_snaps ${V1}
+
+# This part of test is designed to validate that updates are properly being
+# handled during handshake.
+
+activate_snapshots
+
+EXPECT 'Started' snapshot_status ${V0}_snap;
+EXPECT 'Started' snapshot_status ${V1}_snap;
+
+kill_glusterd 2
+
+deactivate_snapshots
+EXPECT 'Stopped' snapshot_status ${V0}_snap;
+EXPECT 'Stopped' snapshot_status ${V1}_snap;
+
+TEST start_glusterd 2
+
+# Updates form friend should reflect as snap was deactivated while glusterd
+# process was inactive and mount point should also not exist.
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" mounted_snaps ${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" mounted_snaps ${V1}
+
+# It might be possible that the import snap synctask is still updating the data,
+# we need to allow a buffer time to be on the safer side
+sleep 2
+
+kill_glusterd 2
+activate_snapshots
+EXPECT 'Started' snapshot_status ${V0}_snap;
+EXPECT 'Started' snapshot_status ${V1}_snap;
+TEST start_glusterd 2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count;
+
+# Updates form friend should reflect as snap was activated while glusterd
+# process was inactive and mount point should exist.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" mounted_snaps ${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" mounted_snaps ${V1}
+
+cleanup;
+# run first!
+#G_TESTDEF_TEST_STATUS_CENTOS6=BRICK_MUX_BAD_TEST,BUG=1743069
diff --git a/tests/bugs/snapshot/bug-1512451-snapshot-creation-failed-after-brick-reset.t b/tests/bugs/snapshot/bug-1512451-snapshot-creation-failed-after-brick-reset.t
new file mode 100644
index 00000000000..53b274e8819
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1512451-snapshot-creation-failed-after-brick-reset.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1/B1 $H2:$L2/B1
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $CLI_1 snapshot create ${V0}_snap1 ${V0} no-timestamp
+TEST snapshot_exists 1 ${V0}_snap1
+
+TEST $CLI_1 snapshot delete ${V0}_snap1
+TEST ! snapshot_exists 1 ${V0}_snap1
+
+TEST $CLI_1 volume reset-brick $V0 $H1:$L1/B1 start
+TEST $CLI_1 volume reset-brick $V0 $H1:$L1/B1 $H1:$L1/B1 commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $L1/B1
+
+TEST $CLI_1 snapshot create ${V0}_snap1 ${V0} no-timestamp
+TEST snapshot_exists 1 ${V0}_snap1
+
+TEST $CLI_1 snapshot delete ${V0}_snap1
+TEST ! snapshot_exists 1 ${V0}_snap1
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1597662.t b/tests/bugs/snapshot/bug-1597662.t
new file mode 100644
index 00000000000..f582930476a
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1597662.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+TEST $CLI volume start $V0;
+
+snap_path=/var/run/gluster/snaps
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+
+$CLI snapshot activate snap1;
+
+EXPECT 'Started' snapshot_status snap1;
+
+# This Function will check for entry /var/run/gluster/snaps/<snap-name>
+# against snap-name
+
+function is_snap_path
+{
+ echo `ls $snap_path | grep snap1 | wc -l`
+}
+
+# snap is active so snap_path should exist
+EXPECT "1" is_snap_path
+
+$CLI snapshot deactivate snap1;
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} 'Stopped' snapshot_status snap1
+# snap is deactivated so snap_path should not exist
+EXPECT "0" is_snap_path
+
+# activate snap again
+$CLI snapshot activate snap1;
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} 'Started' snapshot_status snap1
+
+# snap is active so snap_path should exist
+EXPECT "1" is_snap_path
+
+# delete snap now
+TEST $CLI snapshot delete snap1;
+
+# snap is deleted so snap_path should not exist
+EXPECT "0" is_snap_path
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
+
diff --git a/tests/bugs/snapshot/bug-1618004-fix-memory-corruption-in-snap-import.t b/tests/bugs/snapshot/bug-1618004-fix-memory-corruption-in-snap-import.t
new file mode 100644
index 00000000000..a2c004e435e
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1618004-fix-memory-corruption-in-snap-import.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../cluster.rc
+
+function get_volume_info ()
+{
+ local var=$1
+ $CLI_1 volume info $V0 | grep "^$var" | sed 's/.*: //'
+}
+
+cleanup;
+
+TEST verify_lvm_version
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
+EXPECT "$V0" get_volume_info 'Volume Name';
+EXPECT 'Created' get_volume_info 'Status';
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' get_volume_info 'Status';
+
+
+# Setting system limit
+TEST $CLI_1 snapshot config activate-on-create enable
+
+TEST $CLI_1 snapshot create snap1 $V0 no-timestamp description "test"
+TEST kill_glusterd 1
+#deactivate snapshot for changing snap version, so that handshake will
+#happen when glusterd is restarted
+TEST $CLI_2 snapshot deactivate snap1
+TEST start_glusterd 1
+
+#Wait till handshake complete
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} 'Stopped' snapshot_status snap1
+
+#Delete the snapshot, without this fix, delete will lead to assertion failure
+$CLI_1 snapshot delete all
+EXPECT '0' get_snap_count CLI_1;
+cleanup;
+