summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2014-02-19 16:30:11 +0530
committerVijay Bellur <vbellur@redhat.com>2014-04-11 16:29:17 -0700
commit29bccc2ed18eedc40e83d2f0d35327037a322384 (patch)
tree207829c5a0535af28cbad6de90497d2f48093d1a /tests
parent2045c9ea1c7c3aac9d377070df6f0ee99619f421 (diff)
gluster: GlusterFS Volume Snapshot Feature
This is the initial patch for the Snapshot feature. Current patch includes following features: * Snapshot create * Snapshot delete * Snapshot restore * Snapshot list * Snapshot info * Snapshot status * Snapshot config Change-Id: I2f46920c0d61c515f6a60e0f8b46fff886d9f6a9 BUG: 1061685 Signed-off-by: shishir gowda <sgowda@redhat.com> Signed-off-by: Sachin Pandit <spandit@redhat.com> Signed-off-by: Vijaikumar M <vmallika@redhat.com> Signed-off-by: Raghavendra Bhat <raghavendra@redhat.com> Signed-off-by: Rajesh Joseph <rjoseph@redhat.com> Signed-off-by: Joseph Fernandes <josferna@redhat.com> Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/7128 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--[-rwxr-xr-x]tests/basic/mgmt_v3-locks.t (renamed from tests/basic/volume-locks.t)21
-rwxr-xr-xtests/basic/volume-snapshot.t95
-rwxr-xr-xtests/bugs/bug-1045333.t51
-rwxr-xr-xtests/bugs/bug-1049834.t40
-rw-r--r--tests/bugs/bug-1064768.t20
-rwxr-xr-x[-rw-r--r--]tests/bugs/bug-948686.t0
-rwxr-xr-xtests/cluster.rc3
-rw-r--r--tests/include.rc2
-rwxr-xr-xtests/snapshot.rc290
-rw-r--r--tests/volume.rc51
10 files changed, 570 insertions, 3 deletions
diff --git a/tests/basic/volume-locks.t b/tests/basic/mgmt_v3-locks.t
index b9e94b7e166..22ca27b9f20 100755..100644
--- a/tests/basic/volume-locks.t
+++ b/tests/basic/mgmt_v3-locks.t
@@ -27,13 +27,23 @@ function volinfo_field()
function two_diff_vols_create {
# Both volume creates should be successful
$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 &
- $CLI_2 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1 $H3:$B3/$V1
+ PID_1=$!
+
+ $CLI_2 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1 $H3:$B3/$V1 &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
}
function two_diff_vols_start {
# Both volume starts should be successful
$CLI_1 volume start $V0 &
- $CLI_2 volume start $V1
+ PID_1=$!
+
+ $CLI_2 volume start $V1 &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
}
function two_diff_vols_stop_force {
@@ -42,7 +52,12 @@ function two_diff_vols_stop_force {
# still go ahead. Both volume stops should
# be successful
$CLI_1 volume stop $V0 force &
- $CLI_2 volume stop $V1 force
+ PID_1=$!
+
+ $CLI_2 volume stop $V1 force &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
}
function same_vol_remove_brick {
diff --git a/tests/basic/volume-snapshot.t b/tests/basic/volume-snapshot.t
new file mode 100755
index 00000000000..c826631cad9
--- /dev/null
+++ b/tests/basic/volume-snapshot.t
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../snapshot.rc
+
+V1="patchy2"
+
+function create_volumes() {
+ $CLI_1 volume create $V0 $H1:$L1 &
+ PID_1=$!
+
+ $CLI_2 volume create $V1 $H2:$L2 $H3:$L3 &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+
+function create_snapshots() {
+ $CLI_1 snapshot create ${V0}_snap ${V0}&
+ PID_1=$!
+
+ $CLI_1 snapshot create ${V1}_snap ${V1}&
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+
+function delete_snapshots() {
+ $CLI_1 snapshot delete ${V0}_snap &
+ PID_1=$!
+
+ $CLI_1 snapshot delete ${V1}_snap &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+
+function restore_snapshots() {
+ $CLI_1 snapshot restore ${V0}_snap &
+ PID_1=$!
+
+ $CLI_1 snapshot restore ${V1}_snap &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+cleanup;
+
+#Create cluster with 3 nodes
+TEST launch_cluster 3;
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN 20 2 peer_count;
+
+create_volumes
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Created' volinfo_field $V1 'Status';
+
+start_volumes 2
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT 'Started' volinfo_field $V1 'Status';
+
+#Snapshot Operations
+create_snapshots
+TEST snapshot_exists 1 ${V0}_snap
+TEST snapshot_exists 1 ${V1}_snap
+TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 100
+TEST $CLI_1 snapshot config $V1 snap-max-hard-limit 100
+
+TEST glusterfs -s $H1 --volfile-id=/snaps/${V0}_snap/${V0} $M0
+sleep 2
+TEST umount -f $M0
+TEST glusterfs -s $H2 --volfile-id=/snaps/${V1}_snap/${V1} $M0
+sleep 2
+TEST umount -f $M0
+
+#Clean up
+stop_force_volumes 2
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+EXPECT 'Stopped' volinfo_field $V1 'Status';
+
+restore_snapshots
+TEST ! snapshot_exists 1 ${V0}_snap
+TEST ! snapshot_exists 1 ${V1}_snap
+
+delete_volumes 2
+TEST ! volume_exists $V0
+TEST ! volume_exists $V1
+
+cleanup;
+
diff --git a/tests/bugs/bug-1045333.t b/tests/bugs/bug-1045333.t
new file mode 100755
index 00000000000..8f4798ebc23
--- /dev/null
+++ b/tests/bugs/bug-1045333.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../snapshot.rc
+
+cleanup;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+
+
+S1="${V0}-snap1" #Create snapshot with name contains hyphen(-)
+S2="-${V0}-snap2" #Create snapshot with name starts with hyphen(-)
+#Create snapshot with a long name
+S3="${V0}_single_gluster_volume_is_accessible_by_multiple_clients_offline_snapshot_is_a_long_name"
+
+TEST $CLI snapshot create $S1 $V0
+TEST snapshot_exists 0 $S1
+
+TEST $CLI snapshot create $S2 $V0
+TEST snapshot_exists 0 $S2
+
+TEST $CLI snapshot create $S3 $V0
+TEST snapshot_exists 0 $S3
+
+
+TEST glusterfs -s $H0 --volfile-id=/snaps/$S1/$V0 $M0
+sleep 2
+TEST umount -f $M0
+
+TEST glusterfs -s $H0 --volfile-id=/snaps/$S2/$V0 $M0
+sleep 2
+TEST umount -f $M0
+
+TEST glusterfs -s $H0 --volfile-id=/snaps/$S3/$V0 $M0
+sleep 2
+TEST umount -f $M0
+
+#Clean up
+#TEST $CLI snapshot delete $S1
+#TEST $CLI snapshot delete $S2
+#TEST $CLI snapshot delete $S3
+
+TEST $CLI volume stop $V0 force
+#TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/bug-1049834.t b/tests/bugs/bug-1049834.t
new file mode 100755
index 00000000000..c1b126ba1b1
--- /dev/null
+++ b/tests/bugs/bug-1049834.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../snapshot.rc
+
+cleanup;
+
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN 20 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#Setting the snap-max-hard-limit to 4
+TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 4
+PID_1=$!
+wait $PID_1
+
+#Creating 4 snapshots on the volume
+TEST create_n_snapshots $V0 4 $V0_snap
+TEST snapshot_n_exists $V0 4 $V0_snap
+
+#Creating the 5th snapshots on the volume and expecting it not to be created.
+TEST ! $CLI_1 snapshot create ${V0}_snap5 ${V0}
+TEST ! snapshot_exists 1 ${V0}_snap5
+TEST ! $CLI_1 snapshot delete ${V0}_snap5
+
+#Deleting the 4 snaps
+#TEST delete_n_snapshots $V0 4 $V0_snap
+#TEST ! snapshot_n_exists $V0 4 $V0_snap
+
+cleanup;
diff --git a/tests/bugs/bug-1064768.t b/tests/bugs/bug-1064768.t
new file mode 100644
index 00000000000..b87168150d2
--- /dev/null
+++ b/tests/bugs/bug-1064768.t
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
+TEST $CLI volume start $V0
+EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info
+TEST $CLI volume profile $V0 stop
+
+TEST $CLI volume status
+TEST $CLI volume stop $V0
+EXPECT_WITHIN 15 'Stopped' volinfo_field $V0 'Status';
+cleanup;
diff --git a/tests/bugs/bug-948686.t b/tests/bugs/bug-948686.t
index db9c198a96f..db9c198a96f 100644..100755
--- a/tests/bugs/bug-948686.t
+++ b/tests/bugs/bug-948686.t
diff --git a/tests/cluster.rc b/tests/cluster.rc
index 1e42426f630..efeaa35636d 100755
--- a/tests/cluster.rc
+++ b/tests/cluster.rc
@@ -106,3 +106,6 @@ function define_clis() {
done
}
+function peer_count() {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
diff --git a/tests/include.rc b/tests/include.rc
index 250220efa9b..452e6d7ca2c 100644
--- a/tests/include.rc
+++ b/tests/include.rc
@@ -225,6 +225,7 @@ function cleanup()
umount $m
done
+ type cleanup_lvm &>/dev/null && cleanup_lvm
LOOPDEVICES=`losetup -a | grep "$B0/" | awk '{print $1}' | tr -d :`
for l in $LOOPDEVICES;
@@ -239,6 +240,7 @@ function cleanup()
umount -l $M1 2>/dev/null || true;
umount -l $N0 2>/dev/null || true;
umount -l $N1 2>/dev/null || true;
+
}
function volinfo_field()
diff --git a/tests/snapshot.rc b/tests/snapshot.rc
new file mode 100755
index 00000000000..440059fc1af
--- /dev/null
+++ b/tests/snapshot.rc
@@ -0,0 +1,290 @@
+#!/bin/bash
+
+LVM_DEFINED=0
+LVM_PREFIX="patchy_snap"
+LVM_COUNT=0
+VHD_SIZE="1G"
+
+function init_lvm() {
+ if [ "$1" == "" ]; then
+ echo "Error: Invalid argument supplied"
+ return 1
+ fi
+ LVM_COUNT=$1
+
+ if [ "$2" != "" ]; then
+ VHD_SIZE=$2
+ fi
+
+ local b
+ local i
+
+ if [ "$B1" = "" ]; then
+ B1=$B0
+ fi
+
+ for i in `seq 1 $LVM_COUNT`; do
+ b="B$i"
+ if [ "${!b}" = "" ]; then
+ echo "Error: $b not defined."
+ echo "Please run launch_cluster with atleast $LVM_COUNT nodes"
+ return 1
+ fi
+
+ eval "L$i=${!b}/${LVM_PREFIX}_mnt"
+ l="L$i"
+ mkdir -p ${!l}
+ if [ $? -ne 0 ]; then
+ echo "Error: failed to create dir ${!l}"
+ return 1
+ fi
+
+ eval "VG$i=${LVM_PREFIX}_vg_${i}"
+ done
+
+ LVM_DEFINED=1
+ return 0
+}
+
+function setup_lvm() {
+ init_lvm $@ || return 1
+ _setup_lvm
+ return 0
+}
+
+function cleanup_lvm() {
+ pkill gluster
+ sleep 2
+
+ if [ "$LVM_DEFINED" = "1" ]; then
+ _cleanup_lvm >/dev/null 2>&1
+ fi
+
+ _cleanup_lvm_again >/dev/null 2>&1
+ # TODO Delete cleanup has open bug
+ # once fixed delete this
+ mount | grep "run/gluster/snaps" | awk '{print $3}' | xargs umount 2> /dev/null
+ mount | grep "patchy_snap" | awk '{print $3}' | xargs umount 2> /dev/null
+ \rm -rf /var/run/gluster/snaps/*
+ lvscan | grep "/dev/patchy_snap" | awk '{print $2}'| xargs lvremove -f 2> /dev/null
+ vgs | grep patchy_snap | awk '{print $1}' | xargs vgremove -f 2>/dev/null
+ \rm -rf /dev/patchy*
+ return 0
+}
+
+########################################################
+# Private Functions
+########################################################
+function _setup_lvm() {
+ local count=$LVM_COUNT
+ local b
+ local i
+
+ for i in `seq 1 $count`; do
+ b="B$i"
+
+ _create_vhd ${!b} $i
+ _create_lv ${!b} $i
+ _mount_lv $i
+ done
+}
+
+function _cleanup_lvm() {
+ local count=$LVM_COUNT
+ local b
+ local i
+
+ for i in `seq 1 $count`; do
+ b="B$i"
+ _umount_lv $i
+ _remove_lv $i
+ _remove_vhd ${!b}
+ done
+}
+
+function _cleanup_lvm_again() {
+ local file
+
+ mount | grep $LVM_PREFIX | awk '{print $3}' | xargs -r umount -f
+
+ /sbin/vgs | grep $LVM_PREFIX | awk '{print $1}' | xargs -r vgremove -f
+
+ find $B0 -name "${LVM_PREFIX}_loop" | xargs -r losetup -d
+
+ find $B0 -name "${LVM_PREFIX}*" | xargs -r rm -rf
+
+ find /run/gluster/snaps -name "*${LVM_PREFIX}*" | xargs -r rm -rf
+
+ for file in `ls /run/gluster/snaps`; do
+ find /run/gluster/snaps/$file -mmin -2 | xargs -r rm -rf
+ done
+}
+
+########################################################
+########################################################
+function _create_vhd() {
+ local dir=$1
+ local num=$2
+ local loop_num=`expr $2 + 8`
+
+ fallocate -l${VHD_SIZE} $dir/${LVM_PREFIX}_vhd
+ mknod -m660 $dir/${LVM_PREFIX}_loop b 7 $loop_num
+ /sbin/losetup $dir/${LVM_PREFIX}_loop $dir/${LVM_PREFIX}_vhd
+}
+
+function _create_lv() {
+ local dir=$1
+ local num=$2
+ local vg="VG$num"
+ local thinpoolsize="0.8G"
+ local virtualsize="0.6G"
+
+ /sbin/pvcreate $dir/${LVM_PREFIX}_loop
+ /sbin/vgcreate ${!vg} $dir/${LVM_PREFIX}_loop
+
+ /sbin/lvcreate -L ${thinpoolsize} -T /dev/${!vg}/thinpool
+ /sbin/lvcreate -V ${virtualsize} -T /dev/${!vg}/thinpool -n brick_lvm
+
+ mkfs.xfs -f /dev/${!vg}/brick_lvm
+}
+
+function _mount_lv() {
+ local num=$1
+ local vg="VG$num"
+ local l="L$num"
+
+ mount -t xfs -o nouuid /dev/${!vg}/brick_lvm ${!l}
+}
+
+function _umount_lv() {
+ local num=$1
+ local l="L$num"
+
+ umount -f ${!l} 2>/dev/null || true
+ rmdir ${!l} 2>/dev/null || true
+}
+
+function _remove_lv() {
+ local num=$1
+ local vg="VG$num"
+
+ vgremove -f ${!vg}
+}
+
+function _remove_vhd() {
+ local dir=$1
+
+ losetup -d $dir/${LVM_PREFIX}_loop
+ rm -f $dir/${LVM_PREFIX}_loop
+ rm -f $dir/${LVM_PREFIX}_vhd
+}
+
+########################################################
+# Utility Functions
+########################################################
+function snapshot_exists() {
+ local clitype=$1
+ local snapname=$2
+ local cli=$CLI
+ if [ "$clitype" == "1" ]; then
+ cli=$CLI_1;
+ fi
+ if [ "$clitype" == "2" ]; then
+ cli=$CLI_2;
+ fi
+ $cli snapshot list | egrep -q "^$snapname\$"
+ return $?
+}
+
+#Create N number of snaps in a given volume
+#Arg1 : <Volume Name>
+#Arg2 : <Count of snaps to be created>
+#Arg3 : <Snap Name Pattern>
+#Return: Returns 0 if all snaps are created ,
+# if not will return exit code of last failed
+# snap create command.
+function create_n_snapshots() {
+ local cli=$1
+ local vol=$1
+ local snap_count=$2
+ local snap_name=$3
+ local ret=0
+ for i in `seq 1 $snap_count`; do
+ $CLI_1 snapshot create $snap_name$i ${vol}&
+ PID_1=$!
+ wait $PID_1
+ ret=$?
+ if [ "$ret" != "0" ]; then
+ break
+ fi
+ done
+ return $ret
+}
+
+
+#Delete N number of snaps in a given volume
+#Arg1 : <Volume Name>
+#Arg2 : <Count of snaps to be deleted>
+#Arg3 : <Snap Name Pattern>
+#Return: Returns 0 if all snaps are Delete,
+# if not will return exit code of last failed
+# snap delete command.
+function delete_n_snapshots() {
+ local vol=$1
+ local snap_count=$2
+ local snap_name=$3
+ local ret=0
+ for i in `seq 1 $snap_count`; do
+ $CLI_1 snapshot delete $snap_name$i &
+ PID_1=$!
+ wait $PID_1
+ temp=$?
+ if [ "$temp" != "0" ]; then
+ ret=$temp
+ fi
+ done
+ return $ret
+}
+
+#Check for the existance of N number of snaps in a given volume
+#Arg1 : <Volume Name>
+#Arg2 : <Count of snaps to be checked>
+#Arg3 : <Snap Name Pattern>
+#Return: Returns 0 if all snaps exists,
+# if not will return exit code of last failed
+# snapshot_exists().
+function snapshot_n_exists() {
+ local vol=$1
+ local snap_count=$2
+ local snap_name=$3
+ local ret=0
+ for i in `seq 1 $snap_count`; do
+ snapshot_exists 1 $snap_name$i
+ ret=$?
+ if [ "$ret" != "0" ]; then
+ break
+ fi
+ done
+ return $ret
+}
+
+# TODO: Cleanup code duplication
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI_1 volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+function volume_exists() {
+ local volname=$1
+ $CLI_1 volume info $volname 2>&1 | grep -q 'does not exist'
+ if [ $? -eq 0 ]; then
+ return 1
+ else
+ return 0
+ fi
+}
+
diff --git a/tests/volume.rc b/tests/volume.rc
index 9e4843e06c4..7d2494067e8 100644
--- a/tests/volume.rc
+++ b/tests/volume.rc
@@ -308,3 +308,54 @@ function has_holes {
echo "0"
fi
}
+
+function do_volume_operations() {
+ local operation=$1
+ local count=$2
+ local force=$3
+
+ local pids=()
+ local cli
+ local v
+
+ for i in `seq 1 $count`; do
+ cli="CLI_$i"
+ v="V`expr $i - 1`"
+ ${!cli} volume $operation ${!v} $force &
+ pids[$i]=$!
+ done
+
+ for i in `seq 1 $count`; do
+ wait ${pids[$i]}
+ done
+}
+
+function start_volumes() {
+ do_volume_operations start $1
+}
+
+function stop_volumes() {
+ do_volume_operations stop $1
+}
+
+function start_force_volumes() {
+ do_volume_operations start $1 force
+}
+
+function stop_force_volumes() {
+ do_volume_operations stop $1 force
+}
+
+function delete_volumes() {
+ do_volume_operations delete $1
+}
+
+function volume_exists() {
+ local volname=$1
+ $CLI volume info $volname 2>&1 | grep -q 'does not exist'
+ if [ $? -eq 0 ]; then
+ return 1
+ else
+ return 0
+ fi
+}