diff options
Diffstat (limited to 'tests')
| -rw-r--r-- | tests/README.md (renamed from tests/README) | 19 | ||||
| -rwxr-xr-x | tests/basic/bd.t | 76 | ||||
| -rwxr-xr-x | tests/basic/cdc.t | 135 | ||||
| -rwxr-xr-x | tests/basic/mgmt_v3-locks.t | 121 | ||||
| -rwxr-xr-x | tests/basic/rpm.t | 17 | ||||
| -rwxr-xr-x | tests/basic/volume-snapshot.t | 83 | ||||
| -rwxr-xr-x | tests/bugs/bug-1015990-rep.t | 81 | ||||
| -rwxr-xr-x | tests/bugs/bug-1015990.t | 95 | ||||
| -rwxr-xr-x | tests/bugs/bug-1022055.t | 26 | ||||
| -rw-r--r-- | tests/bugs/bug-1022905.t | 39 | ||||
| -rw-r--r-- | tests/bugs/bug-1030208.t | 35 | ||||
| -rw-r--r-- | tests/bugs/bug-1040934.t | 37 | ||||
| -rw-r--r-- | tests/bugs/bug-1045333.t | 48 | ||||
| -rwxr-xr-x | tests/bugs/bug-1049834.t | 40 | ||||
| -rwxr-xr-x | tests/bugs/bug-1064768.t | 20 | ||||
| -rwxr-xr-x | tests/bugs/bug-830665.t | 1 | ||||
| -rwxr-xr-x | tests/bugs/bug-853258.t | 2 | ||||
| -rw-r--r-- | tests/bugs/bug-857330/common.rc | 2 | ||||
| -rwxr-xr-x | tests/bugs/bug-857330/normal.t | 6 | ||||
| -rw-r--r-- | tests/bugs/bug-948729/bug-948729-force.t | 17 | ||||
| -rw-r--r-- | tests/bugs/bug-948729/bug-948729.t | 3 | ||||
| -rwxr-xr-x | tests/cluster.rc | 16 | ||||
| -rw-r--r-- | tests/include.rc | 5 | ||||
| -rwxr-xr-x | tests/snapshot.rc | 251 | ||||
| -rw-r--r-- | tests/volume.rc | 51 |
25 files changed, 1152 insertions, 74 deletions
diff --git a/tests/README b/tests/README.md index 857230acb..2b5ed8dcd 100644 --- a/tests/README +++ b/tests/README.md @@ -1,26 +1,27 @@ -How to use test script framework. -================================= +Regression tests framework for GlusterFS +======================================== +## Prereq - Build and install the version of glusterfs with your changes. Make sure the installed version is accessible from $PATH. +## How-To - To mount glusterfs, NEVER use 'mount -t glusterfs', instead use 'glusterfs -s ' method. This is because with the patch build setup doesnot install the /sbin/mount.glusterfs necessary, where as the glusterfs binary will be accessible with $PATH, and will pick the right version. - - (optional) Set environment variables to specify location of export directories and mount points. Unless you have special requirements, the defaults should just work. The variables themselves can be found at the top of tests/include.rc. All of them can be overriden with environment variables. -- Execute run-tests.sh in the top level directory as root. +## Usage +- Execute `/usr/share/glusterfs/run-tests.sh` as root. -- If some test cases fail, you can execute the failed test case script - directly bypassing run-tests.sh. At this time it might be - useful to set the envrionment variable DEBUG=1 before running - the individual test script directly by hand. +- If some test cases fail, report to GlusterFS community at + `gluster-devel@nongnu.org`. -- BE WARNED THAT THE TEST CASES DELETE /var/lib/glusterd/* !!! +## Reminder +- BE WARNED THAT THE TEST CASES DELETE /var/lib/glusterd/* !!!
\ No newline at end of file diff --git a/tests/basic/bd.t b/tests/basic/bd.t index 3201b7460..eb6305414 100755 --- a/tests/basic/bd.t +++ b/tests/basic/bd.t @@ -2,9 +2,6 @@ . $(dirname $0)/../include.rc -cleanup; - - function execute() { cmd=$1 @@ -14,11 +11,10 @@ function execute() function bd_cleanup() { - execute vgremove -f ${VG} + execute vgremove -f ${V0} execute pvremove ${ld} execute losetup -d ${ld} execute rm ${BD_DISK} - execute $CLI volume delete ${V0} cleanup } @@ -31,9 +27,10 @@ function check() fi } -VG=__bd_vg SIZE=256 #in MB +bd_cleanup; + ## Configure environment needed for BD backend volumes ## Create a file with configured size and ## set it as a temporary loop device to create @@ -56,23 +53,28 @@ function configure() check losetup ${BD_DISK} execute pvcreate -f ${ld} check pvcreate ${ld} - execute vgcreate ${VG} ${ld} - check vgcreate ${VG} + execute vgcreate ${V0} ${ld} + check vgcreate ${V0} + execute lvcreate --thin ${V0}/pool --size 128M } function volinfo_field() { local vol=$1; local field=$2; - $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; } +function volume_type() +{ + getfattr -n volume.type $M0/. --only-values --absolute-names -e text +} + TEST glusterd TEST pidof glusterd configure -TEST $CLI volume create $V0 device vg ${H0}:/${VG} +TEST $CLI volume create $V0 ${H0}:/$B0/$V0?${V0} EXPECT "$V0" volinfo_field $V0 'Volume Name'; EXPECT 'Created' volinfo_field $V0 'Status'; @@ -80,38 +82,50 @@ EXPECT 'Created' volinfo_field $V0 'Status'; TEST $CLI volume start $V0; EXPECT 'Started' volinfo_field $V0 'Status' -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +EXPECT '1' volume_type -## Create file (LV) -TEST touch $M0/$VG/lv1 -TEST stat /dev/$VG/lv1 +## Create posix file +TEST touch $M0/posix -TEST rm $M0/$VG/lv1; -TEST ! stat $M0/$VG/lv1; +TEST touch $M0/lv +gfid=`getfattr -n glusterfs.gfid.string $M0/lv --only-values --absolute-names` +TEST setfattr -n user.glusterfs.bd -v "lv:4MB" $M0/lv +# Check if LV is created +TEST stat /dev/$V0/${gfid} -TEST touch $M0/$VG/lv1 -TEST truncate -s64M $M0/$VG/lv1 +## Create filesystem +sleep 1 +TEST mkfs.ext4 -qF $M0/lv +# Cloning +TEST touch $M0/lv_clone +gfid=`getfattr -n glusterfs.gfid.string $M0/lv_clone --only-values --absolute-names` +TEST setfattr -n clone -v ${gfid} $M0/lv +TEST stat /dev/$V0/${gfid} -TEST ln $M0/$VG/lv1 $M0/$VG/lv2 -TEST stat /dev/$VG/lv2 +sleep 1 +## Check mounting +TEST mount -o loop $M0/lv $M1 +umount $M1 -rm $M0/$VG/lv1 -rm $M0/$VG/lv2 +# Snapshot +TEST touch $M0/lv_sn +gfid=`getfattr -n glusterfs.gfid.string $M0/lv_sn --only-values --absolute-names` +TEST setfattr -n snapshot -v ${gfid} $M0/lv +TEST stat /dev/$V0/${gfid} -TEST $CLI bd create $V0:/$VG/lv1 4MB -TEST stat /dev/$VG/lv1 +# Merge +sleep 1 +TEST setfattr -n merge -v "$M0/lv_sn" $M0/lv_sn +TEST ! stat $M0/lv_sn +TEST ! stat /dev/$V0/${gfid} -TEST $CLI bd clone $V0:/$VG/lv1 lv2 -TEST stat /dev/$VG/lv2 -TEST $CLI bd delete $V0:/$VG/lv2 -TEST $CLI bd snapshot $V0:/$VG/lv1 lv2 1 -TEST stat /dev/$VG/lv2 -rm $M0/$VG/lv2 -rm $M0/$VG/lv1 +rm $M0/* -f TEST umount $M0 TEST $CLI volume stop ${V0} +EXPECT 'Stopped' volinfo_field $V0 'Status'; TEST $CLI volume delete ${V0} bd_cleanup diff --git a/tests/basic/cdc.t b/tests/basic/cdc.t new file mode 100755 index 000000000..4cd915aa9 --- /dev/null +++ b/tests/basic/cdc.t @@ -0,0 +1,135 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +## Create a volume with one brick +TEST $CLI volume create $V0 $H0:$B0/${V0}1; +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT '1' brick_count $V0 + +## Turn off performance translators +## This is required for testing readv calls +TEST $CLI volume set $V0 performance.io-cache off +EXPECT 'off' volinfo_field $V0 'performance.io-cache' +TEST $CLI volume set $V0 performance.quick-read off +EXPECT 'off' volinfo_field $V0 'performance.quick-read' + +TEST $CLI volume set $V0 strict-write-ordering on +EXPECT 'on' volinfo_field $V0 'performance.strict-write-ordering' + +## Turn on cdc xlator by setting features.compress to on +TEST $CLI volume set $V0 compress on +EXPECT 'on' volinfo_field $V0 'features.compress' +EXPECT 'server' volinfo_field $V0 'compress.mode' + +## Make sure that user cannot change compress.mode +## This would break the cdc xlator if allowed! +TEST $CLI volume set $V0 compress.mode client +EXPECT 'server' volinfo_field $V0 'compress.mode' + +## Turn on compress.debug option +## This will dump compressed data onto disk as gzip file +## This is used to check if compression actually happened +TEST $CLI volume set $V0 compress.debug on +EXPECT 'on' volinfo_field $V0 'compress.debug' + +## Start the volume +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +#################### +## Testing writev ## +#################### + +## Create a 1K file locally and find the md5sum +TEST dd if=/dev/zero of=/tmp/cdc-orig count=1 bs=1K 2>/dev/null +checksum[original-file]=`md5sum /tmp/cdc-orig | cut -d' ' -f1` + +## Copy the file to mountpoint and find its md5sum on brick +TEST dd if=/tmp/cdc-orig of=$M0/cdc-server count=1 bs=1K 2>/dev/null +checksum[brick-file]=`md5sum $B0/${V0}1/cdc-server | cut -d' ' -f1` + +## Uncompress the gzip dump file and find its md5sum +EXPECT '/tmp/cdcdump.gz: application/x-gzip; charset=binary' file -i /tmp/cdcdump.gz +TEST gunzip -f /tmp/cdcdump.gz +checksum[dump-file-writev]=`md5sum /tmp/cdcdump | cut -d' ' -f1` + +## Check if all 3 checksums are same +TEST test ${checksum[original-file]} = ${checksum[brick-file]} +TEST test ${checksum[brick-file]} = ${checksum[dump-file-writev]} + +## Cleanup files +TEST rm -f /tmp/cdcdump.gz + +################### +## Testing readv ## +################### + +## Copy file from mount point to client and find checksum +TEST dd if=$M0/cdc-server of=/tmp/cdc-client count=1 bs=1K 2>/dev/null +checksum[client-file]=`md5sum /tmp/cdc-client | cut -d' ' -f1` + +## Uncompress the gzip dump file and find its md5sum +EXPECT '/tmp/cdcdump.gz: application/x-gzip; charset=binary' file -i /tmp/cdcdump.gz +TEST gunzip -f /tmp/cdcdump.gz +checksum[dump-file-readv]=`md5sum /tmp/cdcdump | cut -d' ' -f1` + +## Check if all 3 checksums are same +TEST test ${checksum[brick-file]} = ${checksum[client-file]} +TEST test ${checksum[client-file]} = ${checksum[dump-file-readv]} + +## Cleanup files and unmount +TEST rm -f /tmp/cdc* $M0/cdc* +TEST umount $M0 + +## Stop the volume +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +## Turn on compress.min-size and set it to 100 bytes +## Compression should not take place if file size +## is less than 100 bytes +TEST $CLI volume set $V0 compress.min-size 100 +EXPECT '100' volinfo_field $V0 'compress.min-size' + +## Start the volume +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +## Create a file of size 99 bytes on mountpoint +## This is should not be compressed +TEST dd if=/dev/zero of=$M0/cdc-small count=1 bs=99 2>/dev/null +TEST ! test -e /tmp/cdcdump.gz + +## Cleanup files and unmount +TEST rm -f /tmp/cdc* $M0/cdc* +TEST umount $M0 + +## Reset the compress options +TEST $CLI volume reset $V0 compress.debug +TEST $CLI volume reset $V0 compress.min-size +TEST $CLI volume reset $V0 compress.mode +TEST $CLI volume reset $V0 features.compress + +## Stop the volume +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +## Delete the volume +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/basic/mgmt_v3-locks.t b/tests/basic/mgmt_v3-locks.t new file mode 100755 index 000000000..22ca27b9f --- /dev/null +++ b/tests/basic/mgmt_v3-locks.t @@ -0,0 +1,121 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +function volume_count { + local cli=$1; + if [ $cli -eq '1' ] ; then + $CLI_1 volume info | grep 'Volume Name' | wc -l; + else + $CLI_2 volume info | grep 'Volume Name' | wc -l; + fi +} + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI_1 volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +function two_diff_vols_create { + # Both volume creates should be successful + $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 & + PID_1=$! + + $CLI_2 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1 $H3:$B3/$V1 & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function two_diff_vols_start { + # Both volume starts should be successful + $CLI_1 volume start $V0 & + PID_1=$! + + $CLI_2 volume start $V1 & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function two_diff_vols_stop_force { + # Force stop, so that if rebalance from the + # remove bricks is in progress, stop can + # still go ahead. Both volume stops should + # be successful + $CLI_1 volume stop $V0 force & + PID_1=$! + + $CLI_2 volume stop $V1 force & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function same_vol_remove_brick { + + # Running two same vol commands at the same time can result in + # two success', two failures, or one success and one failure, all + # of which are valid. The only thing that shouldn't happen is a + # glusterd crash. + + local vol=$1 + local brick=$2 + $CLI_1 volume remove-brick $1 $2 start & + $CLI_2 volume remove-brick $1 $2 start +} + +cleanup; + +TEST launch_cluster 3; +TEST $CLI_1 peer probe $H2; +TEST $CLI_1 peer probe $H3; + +EXPECT_WITHIN 20 2 check_peers + +two_diff_vols_create +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Created' volinfo_field $V1 'Status'; + +two_diff_vols_start +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'Started' volinfo_field $V1 'Status'; + +same_vol_remove_brick $V0 $H2:$B2/$V0 +# Checking glusterd crashed or not after same volume remove brick +# on both nodes. +EXPECT_WITHIN 20 2 check_peers + +same_vol_remove_brick $V1 $H2:$B2/$V1 +# Checking glusterd crashed or not after same volume remove brick +# on both nodes. +EXPECT_WITHIN 20 2 check_peers + +$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG & +$CLI_1 volume set $V1 diagnostics.client-log-level DEBUG +kill_glusterd 3 +$CLI_1 volume status $V0 +$CLI_2 volume status $V1 +$CLI_1 peer status +EXPECT_WITHIN 20 1 check_peers +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'Started' volinfo_field $V1 'Status'; + +TEST $glusterd_3 +$CLI_1 volume status $V0 +$CLI_2 volume status $V1 +$CLI_1 peer status +#EXPECT_WITHIN 20 2 check_peers +#EXPECT 'Started' volinfo_field $V0 'Status'; +#EXPECT 'Started' volinfo_field $V1 'Status'; +#two_diff_vols_stop_force +#EXPECT_WITHIN 20 2 check_peers +cleanup; diff --git a/tests/basic/rpm.t b/tests/basic/rpm.t index e2c630f8e..a577726a8 100755 --- a/tests/basic/rpm.t +++ b/tests/basic/rpm.t @@ -35,18 +35,11 @@ else GIT_PARENT=$(git describe --abbrev=0) fi -# check for changed files -CHANGED_FILES=$(git diff --name-only ${GIT_PARENT}) -# if a commit changes this test, we should not skip it -SELFTEST=$(grep -e 'tests/basic/rpm.t' <<< "${CHANGED_FILES}") -# filter out any files not affecting the build itself -CHANGED_FILES=$(grep -E -v \ - -e '\.c$' \ - -e '\.h$' \ - -e '\.py$' \ - -e '^tests/' \ - <<< "${CHANGED_FILES}") -if [ -z "${CHANGED_FILES}" -a -z "${SELFTEST}" ] +# Filter out everything and what remains needs to be built +BUILD_FILES=$(git diff --name-status ${GIT_PARENT} | grep -Ev '^M.*\.(c|h|py)' | awk {'print $2'}) +SELFTEST=$(grep -e 'tests/basic/rpm.t' <<< "${BUILD_FILES}") +BUILD_FILES=$(grep -Ev '^tests/' <<< "${BUILD_FILES}") +if [ -z "${BUILD_FILES}" -a -z "${SELFTEST}" ] then # nothing affecting packaging changed, no need to retest rpmbuild SKIP_TESTS diff --git a/tests/basic/volume-snapshot.t b/tests/basic/volume-snapshot.t new file mode 100755 index 000000000..35c748372 --- /dev/null +++ b/tests/basic/volume-snapshot.t @@ -0,0 +1,83 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc +. $(dirname $0)/../cluster.rc +. $(dirname $0)/../snapshot.rc + +V1="patchy2" + +function create_volumes() { + $CLI_1 volume create $V0 $H1:$L1 & + PID_1=$! + + $CLI_2 volume create $V1 $H2:$L2 $H3:$L3 & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function create_snapshots() { + $CLI_1 snapshot create ${V0}_snap ${V0}& + PID_1=$! + + $CLI_1 snapshot create ${V1}_snap ${V1}& + PID_2=$! + + wait $PID_1 $PID_2 +} + +function delete_snapshots() { + $CLI_1 snapshot delete ${V0}_snap & + PID_1=$! + + $CLI_1 snapshot delete ${V1}_snap & + PID_2=$! + + wait $PID_1 $PID_2 +} +cleanup; + +#Create cluster with 3 nodes +TEST launch_cluster 3; +TEST setup_lvm 3 + +TEST $CLI_1 peer probe $H2; +TEST $CLI_1 peer probe $H3; +EXPECT_WITHIN 20 2 peer_count; + +create_volumes +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Created' volinfo_field $V1 'Status'; + +start_volumes 2 +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'Started' volinfo_field $V1 'Status'; + +#Snapshot Operations +create_snapshots +TEST snapshot_exists $V0 ${V0}_snap +TEST snapshot_exists $V1 ${V1}_snap + +TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 100 +TEST $CLI_1 snapshot config $V1 snap-max-hard-limit 100 + +TEST mount -t glusterfs $H1:/snaps/${V0}_snap/${V0} $M0 +TEST umount -f $M0 +TEST mount -t glusterfs $H2:/snaps/${V1}_snap/${V1} $M0 +TEST umount -f $M0 + +#Clean up +delete_snapshots +TEST ! snapshot_exists $V0 ${V0}_snap +TEST ! snapshot_exists $V1 ${V1}_snap + +stop_force_volumes 2 +EXPECT 'Stopped' volinfo_field $V0 'Status'; +EXPECT 'Stopped' volinfo_field $V1 'Status'; + +delete_volumes 2 +TEST ! volume_exists $V0 +TEST ! volume_exists $V1 + +cleanup; diff --git a/tests/bugs/bug-1015990-rep.t b/tests/bugs/bug-1015990-rep.t new file mode 100755 index 000000000..f59bb2f75 --- /dev/null +++ b/tests/bugs/bug-1015990-rep.t @@ -0,0 +1,81 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc +. $(dirname $0)/../afr.rc +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + + + +TEST kill_brick $V0 $H0 $B0/$V0"1" +sleep 5 +TEST kill_brick $V0 $H0 $B0/$V0"3" +sleep 5 + +for i in {1..100}; do echo "STRING" > $M0/File$i; done + +brick_2_sh_entries=$(count_sh_entries $B0/$V0"2") +brick_4_sh_entries=$(count_sh_entries $B0/$V0"4") + + +command_output=$(gluster volume heal $V0 statistics heal-count replica $H0:$B0/$V0"1") + + +substring="Number of entries:" +count=0 +while read -r line; +do + if [[ "$line" == *$substring* ]] + then + value=$(echo $line | cut -f 2 -d :) + count=$(($count + $value)) + fi + +done <<< "$command_output" + +brick_2_entries_count=$(($count-$value)) + +EXPECT "0" echo $brick_2_entries_count + +brick_2_entries_count=$count + + +xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2") +##Remove the count of the xattrop-gfid entry count as it does not contribute +##to the number of files to be healed + +sub_val=1 +xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val)) + +ret=0 +if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ] + then + ret=$(($ret + $sub_val)) +fi + +EXPECT "1" echo $ret +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0 + +cleanup; diff --git a/tests/bugs/bug-1015990.t b/tests/bugs/bug-1015990.t new file mode 100755 index 000000000..165af5168 --- /dev/null +++ b/tests/bugs/bug-1015990.t @@ -0,0 +1,95 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc +. $(dirname $0)/../afr.rc +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + + + +TEST kill_brick $V0 $H0 $B0/$V0"1" +sleep 5 +TEST kill_brick $V0 $H0 $B0/$V0"3" +sleep 5 + +for i in {1..100}; do echo "STRING" > $M0/File$i; done + +brick_2_sh_entries=$(count_sh_entries $B0/$V0"2") +brick_4_sh_entries=$(count_sh_entries $B0/$V0"4") + + +command_output=$(gluster volume heal $V0 statistics heal-count) + + +substring="Number of entries:" +count=0 +while read -r line; +do + if [[ "$line" == *$substring* ]] + then + value=$(echo $line | cut -f 2 -d :) + count=$(($count + $value)) + fi + +done <<< "$command_output" + +brick_2_entries_count=$(($count-$value)) +brick_4_entries_count=$value + + +xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2") +##Remove the count of the xattrop-gfid entry count as it does not contribute +##to the number of files to be healed + +sub_val=1 +xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val)) + +xattrop_count_brick_4=$(count_sh_entries $B0/$V0"4") +##Remove xattrop-gfid entry count + +xattrop_count_brick_4=$(($xattrop_count_brick_4-$sub_val)) + + +ret=0 +if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ] + then + ret=$(($ret + $sub_val)) +fi + +EXPECT "1" echo $ret + + +ret=0 +if [ "$xattrop_count_brick_4" -eq "$brick_4_entries_count" ] + then + ret=$(($ret + $sub_val)) +fi + +EXPECT "1" echo $ret + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0 + +cleanup; + diff --git a/tests/bugs/bug-1022055.t b/tests/bugs/bug-1022055.t new file mode 100755 index 000000000..c2f4218bb --- /dev/null +++ b/tests/bugs/bug-1022055.t @@ -0,0 +1,26 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +cleanup; + +TEST launch_cluster 2; + +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN 20 1 check_peers; + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0; + +TEST $CLI_1 volume start $V0; + +TEST $CLI_1 volume log rotate $V0; + +TEST $CLI_1 volume status; + +cleanup; diff --git a/tests/bugs/bug-1022905.t b/tests/bugs/bug-1022905.t new file mode 100644 index 000000000..aef3395dd --- /dev/null +++ b/tests/bugs/bug-1022905.t @@ -0,0 +1,39 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc + +cleanup; + +## Create a volume +TEST glusterd; +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1}; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Volume start +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Enable a protected and a resettable/unprotected option +TEST $CLI volume quota $V0 enable +TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG + +## Reset cmd resets only unprotected option(s), succeeds. +TEST $CLI volume reset $V0; + +## Reset should fail +TEST ! $CLI volume reset $V0; + +## Set an unprotected option +TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG + +## Now 1 protected and 1 unprotected options are set +## Reset force should succeed +TEST $CLI volume reset $V0 force; + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/bug-1030208.t b/tests/bugs/bug-1030208.t new file mode 100644 index 000000000..866999692 --- /dev/null +++ b/tests/bugs/bug-1030208.t @@ -0,0 +1,35 @@ +#!/bin/bash + +#Test case: Hardlink test + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc + +cleanup; + +#Basic checks +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info + +#Create a distributed volume +TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}; +TEST $CLI volume start $V0 + +# Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +#Create a file and perform fop on a DIR +TEST touch $M0/foo +TEST ls $M0/ + +#Create hardlink +TEST ln $M0/foo $M0/bar + + +TEST umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/bug-1040934.t b/tests/bugs/bug-1040934.t new file mode 100644 index 000000000..3089d7ce1 --- /dev/null +++ b/tests/bugs/bug-1040934.t @@ -0,0 +1,37 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../cluster.rc +. $(dirname $0)/../snapshot.rc +. $(dirname $0)/../volume.rc + +cleanup; + +TEST launch_cluster 2 +TEST setup_lvm 2 + +TEST $CLI_1 peer probe $H2 +EXPECT_WITHIN 20 1 peer_count + +TEST $CLI_1 volume create $V0 replica 2 $H1:$L1 $H2:$L2 +EXPECT 'Created' volinfo_field $V0 'Status' + +TEST $CLI_1 volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status' + +TEST $CLI_1 snapshot create ${V0}_snap ${V0} +PID_1=$! +wait $PID_1 + +TEST snapshot_exists ${V0}_snap +TEST mount -t glusterfs $H1:/snaps/${V0}_snap/$V0 $M0 +cd $M0 +TEST ! touch a + +TEST $CLI_1 snapshot delete ${V0}_snap +PID_1=$! +wait $PID_1 + +TEST ! snapshot_exists ${V0}_snap + +cleanup; diff --git a/tests/bugs/bug-1045333.t b/tests/bugs/bug-1045333.t new file mode 100644 index 000000000..d1f8069e8 --- /dev/null +++ b/tests/bugs/bug-1045333.t @@ -0,0 +1,48 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../snapshot.rc + +cleanup; +TEST glusterd; +TEST pidof glusterd; + +TEST setup_lvm 1 + +TEST $CLI volume create $V0 $H0:$L1 +TEST $CLI volume start $V0 + + +S1="${V0}-snap1" #Create snapshot with name contains hyphen(-) +S2="-${V0}-snap2" #Create snapshot with name starts with hyphen(-) +#Create snapshot with a long name +S3="${V0}_single_gluster_volume_is_accessible_by_multiple_clients_offline_snapshot_is_a_long_name" + +TEST $CLI snapshot create $S1 $V0 +TEST snapshot_exists $S1 + +TEST $CLI snapshot create $S2 $V0 +TEST snapshot_exists $S2 + +TEST $CLI snapshot create $S3 $V0 +TEST snapshot_exists $S3 + + +TEST mount -t glusterfs $H0:/snaps/$S1/$V0 $M0 +TEST umount -f $M0 + +TEST mount -t glusterfs $H0:/snaps/$S2/$V0 $M0 +TEST umount -f $M0 + +TEST mount -t glusterfs $H0:/snaps/$S3/$V0 $M0 +TEST umount -f $M0 + +#Clean up +TEST $CLI snapshot delete $S1 +TEST $CLI snapshot delete $S2 +TEST $CLI snapshot delete $S3 + +TEST $CLI volume stop $V0 force +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/bug-1049834.t b/tests/bugs/bug-1049834.t new file mode 100755 index 000000000..6019a561c --- /dev/null +++ b/tests/bugs/bug-1049834.t @@ -0,0 +1,40 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../cluster.rc +. $(dirname $0)/../snapshot.rc +. $(dirname $0)/../volume.rc + +cleanup; + +TEST launch_cluster 2 +TEST setup_lvm 2 + +TEST $CLI_1 peer probe $H2 +EXPECT_WITHIN 20 1 peer_count + +TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2 +EXPECT 'Created' volinfo_field $V0 'Status' + +TEST $CLI_1 volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status' + +#Setting the snap-max-hard-limit to 4 +TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 4 +PID_1=$! +wait $PID_1 + +#Creating 4 snapshots on the volume +TEST create_n_snapshots $V0 4 $V0_snap +TEST snapshot_n_exists $V0 4 $V0_snap + +#Creating the 5th snapshots on the volume and expecting it not to be created. +TEST ! $CLI_1 snapshot create ${V0}_snap5 ${V0} +TEST ! snapshot_exists ${V0}_snap5 +TEST ! $CLI_1 snapshot delete ${V0}_snap5 + +#Deleting the 4 snaps +TEST delete_n_snapshots $V0 4 $V0_snap +TEST ! snapshot_n_exists $V0 4 $V0_snap + +cleanup; diff --git a/tests/bugs/bug-1064768.t b/tests/bugs/bug-1064768.t new file mode 100755 index 000000000..b87168150 --- /dev/null +++ b/tests/bugs/bug-1064768.t @@ -0,0 +1,20 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1 +TEST $CLI volume start $V0 +EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status'; + +TEST $CLI volume profile $V0 start +TEST $CLI volume profile $V0 info +TEST $CLI volume profile $V0 stop + +TEST $CLI volume status +TEST $CLI volume stop $V0 +EXPECT_WITHIN 15 'Stopped' volinfo_field $V0 'Status'; +cleanup; diff --git a/tests/bugs/bug-830665.t b/tests/bugs/bug-830665.t index 0373ec6d7..0073ff1d9 100755 --- a/tests/bugs/bug-830665.t +++ b/tests/bugs/bug-830665.t @@ -35,6 +35,7 @@ TEST $CLI volume set $V0 performance.stat-prefetch off; ## Make sure automatic self-heal doesn't perturb our results. TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 ## Start volume and verify TEST $CLI volume start $V0; diff --git a/tests/bugs/bug-853258.t b/tests/bugs/bug-853258.t index 0ca995551..faa9d4465 100755 --- a/tests/bugs/bug-853258.t +++ b/tests/bugs/bug-853258.t @@ -38,7 +38,7 @@ for i in $(seq 0 3); do xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)" done -overlap=$($(dirname $0)/overlap.py $xattrs) +overlap=$(python2 $(dirname $0)/overlap.py $xattrs) # 2863311531 = 0xaaaaaaab = 2/3 overlap TEST [ "$overlap" -ge 2863311531 ] diff --git a/tests/bugs/bug-857330/common.rc b/tests/bugs/bug-857330/common.rc index 4e5a73a0b..e5a7cd79a 100644 --- a/tests/bugs/bug-857330/common.rc +++ b/tests/bugs/bug-857330/common.rc @@ -22,7 +22,7 @@ function check-and-store-task-id() function get-task-id() { - $CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX" + $CLI $COMMAND | grep $PATTERN | grep -o -E "$UUID_REGEX" | tail -n1 } diff --git a/tests/bugs/bug-857330/normal.t b/tests/bugs/bug-857330/normal.t index 4455d1620..24dfe52c4 100755 --- a/tests/bugs/bug-857330/normal.t +++ b/tests/bugs/bug-857330/normal.t @@ -28,7 +28,7 @@ PATTERN="ID:" TEST check-and-store-task-id COMMAND="volume status $V0" -PATTERN="Rebalance" +PATTERN="ID" EXPECT $TASK_ID get-task-id COMMAND="volume rebalance $V0 status" @@ -45,7 +45,7 @@ PATTERN="ID:" TEST check-and-store-task-id COMMAND="volume status $V0" -PATTERN="Replace" +PATTERN="ID" EXPECT $TASK_ID get-task-id COMMAND="volume replace-brick $V0 $REP_BRICK_PAIR status" @@ -62,7 +62,7 @@ PATTERN="ID:" TEST check-and-store-task-id COMMAND="volume status $V0" -PATTERN="Remove" +PATTERN="ID" EXPECT $TASK_ID get-task-id COMMAND="volume remove-brick $V0 $H0:$B0/${V0}3 status" diff --git a/tests/bugs/bug-948729/bug-948729-force.t b/tests/bugs/bug-948729/bug-948729-force.t index 3a5df7916..d14e94061 100644 --- a/tests/bugs/bug-948729/bug-948729-force.t +++ b/tests/bugs/bug-948729/bug-948729-force.t @@ -16,7 +16,6 @@ V1=patchy1 V2=patchy2 TEST launch_cluster 2; -cli1="gluster --remote-host=$H1" TEST $CLI_1 peer probe $H2; @@ -59,22 +58,22 @@ TEST mount -t xfs $LD5 $B5/$V0 TEST mount -t xfs $LD6 $B6/$V0 #Case 0: Parent directory of the brick is absent -TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 force +TEST ! $CLI1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 force #Case 1: File system root is being used as brick directory -TEST $cli1 volume create $V0 $H1:$B5/$V0 $H2:$B6/$V0 force +TEST $CLI1 volume create $V0 $H1:$B5/$V0 $H2:$B6/$V0 force #Case 2: Brick directory contains only one component -TEST $cli1 volume create $V1 $H1:/$uuid1 $H2:/$uuid2 force +TEST $CLI1 volume create $V1 $H1:/$uuid1 $H2:/$uuid2 force #Case 3: Sub-directories of the backend FS being used as brick directory -TEST $cli1 volume create $V2 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 force +TEST $CLI1 volume create $V2 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 force #add-brick tests -TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 force -TEST $cli1 volume add-brick $V0 $H1:$B3/$V0 force -TEST $cli1 volume add-brick $V1 $H1:/$uuid3 force -TEST $cli1 volume add-brick $V2 $H1:$B4/$V0/brick3 force +TEST ! $CLI1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 force +TEST $CLI1 volume add-brick $V0 $H1:$B3/$V0 force +TEST $CLI1 volume add-brick $V1 $H1:/$uuid3 force +TEST $CLI1 volume add-brick $V2 $H1:$B4/$V0/brick3 force #####replace-brick tests #FIX-ME: replace-brick does not work with the newly introduced cluster test diff --git a/tests/bugs/bug-948729/bug-948729.t b/tests/bugs/bug-948729/bug-948729.t index 288ae2bef..f94db1ea0 100644 --- a/tests/bugs/bug-948729/bug-948729.t +++ b/tests/bugs/bug-948729/bug-948729.t @@ -14,7 +14,6 @@ uuid2=`uuidgen`; uuid3=`uuidgen`; TEST launch_cluster 2; -cli1="gluster --remote-host=$H1" TEST $CLI_1 peer probe $H2; @@ -42,7 +41,7 @@ TEST mount -t xfs $LD2 $B2/$V0 TEST mount -t xfs $LD3 $B3/$V0 #Tests without --mode=script option - +cli1=$(echo $CLI1 | sed 's/ --mode=script//') #Case 0: Parent directory of the brick is absent TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 diff --git a/tests/cluster.rc b/tests/cluster.rc index 1c06bca47..3b10d19f7 100755 --- a/tests/cluster.rc +++ b/tests/cluster.rc @@ -45,8 +45,10 @@ function define_glusterds() { wopt="management.working-directory=${!b}/glusterd"; bopt="management.transport.socket.bind-address=${!h}"; popt="--pid-file=${!b}/glusterd.pid"; - eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt $popt'"; - eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt $popt'"; + sopt="management.glusterd-sockfile=${!b}/glusterd/gd.sock" + lopt="--log-file=${!b}/glusterd.log" + eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'"; + eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'"; done } @@ -98,9 +100,13 @@ function define_clis() { local h; for i in `seq 1 $count`; do - h="H$i"; - eval "CLI_$i='$CLI --remote-host=${!h}'"; - eval "CLI$i='$CLI --remote-host=${!h}'"; + b="B$i"; + eval "CLI_$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock'"; + eval "CLI$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock'"; done + CLI="$CLI_1" } +function peer_count() { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} diff --git a/tests/include.rc b/tests/include.rc index bb541eaa1..80457f124 100644 --- a/tests/include.rc +++ b/tests/include.rc @@ -3,6 +3,7 @@ M1=${M1:=/mnt/glusterfs/1}; # 1st mount point for FUSE N0=${N0:=/mnt/nfs/0}; # 0th mount point for NFS N1=${N1:=/mnt/nfs/1}; # 1st mount point for NFS V0=${V0:=patchy}; # volume name to use in tests +V1=${V1:=patchy1}; # volume name to use in tests B0=${B0:=/d/backends}; # top level of brick directories H0=${H0:=`hostname --fqdn`}; # hostname DEBUG=${DEBUG:=0} # turn on debugging? @@ -186,13 +187,14 @@ function cleanup() killall -15 glusterfs glusterfsd glusterd 2>/dev/null || true; killall -9 glusterfs glusterfsd glusterd 2>/dev/null || true; + type cleanup_lvm &>/dev/null && cleanup_lvm + MOUNTPOINTS=`mount | grep "$B0/" | awk '{print $3}'` for m in $MOUNTPOINTS; do umount $m done - LOOPDEVICES=`losetup -a | grep "$B0/" | awk '{print $1}' | tr -d :` for l in $LOOPDEVICES; do @@ -206,6 +208,7 @@ function cleanup() umount -l $M1 2>/dev/null || true; umount -l $N0 2>/dev/null || true; umount -l $N1 2>/dev/null || true; + } function volinfo_field() diff --git a/tests/snapshot.rc b/tests/snapshot.rc new file mode 100755 index 000000000..a5b86f674 --- /dev/null +++ b/tests/snapshot.rc @@ -0,0 +1,251 @@ +#!/bin/bash + +LVM_DEFINED=0 +LVM_PREFIX="patchy_snap" +LVM_COUNT=0 +VHD_SIZE="1G" + +function init_lvm() { + if [ "$1" == "" ]; then + echo "Error: Invalid argument supplied" + return 1 + fi + LVM_COUNT=$1 + + if [ "$2" != "" ]; then + VHD_SIZE=$2 + fi + + local b + local i + + if [ "$B1" = "" ]; then + B1=$B0 + fi + + for i in `seq 1 $LVM_COUNT`; do + b="B$i" + if [ "${!b}" = "" ]; then + echo "Error: $b not defined." + echo "Please run launch_cluster with atleast $LVM_COUNT nodes" + return 1 + fi + + eval "L$i=${!b}/${LVM_PREFIX}_mnt" + l="L$i" + mkdir -p ${!l} + if [ $? -ne 0 ]; then + echo "Error: failed to create dir ${!l}" + return 1 + fi + + eval "VG$i=${LVM_PREFIX}_vg_${i}" + done + + LVM_DEFINED=1 + return 0 +} + +function setup_lvm() { + init_lvm $@ || return 1 + _setup_lvm + return 0 +} + +function cleanup_lvm() { + pkill gluster + sleep 2 + + if [ "$LVM_DEFINED" = "1" ]; then + _cleanup_lvm + fi + + _cleanup_lvm_again >/dev/null 2>&1 + return 0 +} + +######################################################## +# Private Functions +######################################################## +function _setup_lvm() { + local count=$LVM_COUNT + local b + local i + + for i in `seq 1 $count`; do + b="B$i" + + _create_vhd ${!b} $i + _create_lv ${!b} $i + _mount_lv $i + done +} + +function _cleanup_lvm() { + local count=$LVM_COUNT + local b + local i + + for i in `seq 1 $count`; do + b="B$i" + _umount_lv $i + _remove_lv $i + _remove_vhd ${!b} + done +} + +function _cleanup_lvm_again() { + local file + + mount | grep $LVM_PREFIX | awk '{print $3}' | xargs -r umount -f + + /sbin/vgs | grep $LVM_PREFIX | awk '{print $1}' | xargs -r vgremove -f + + find $B0 -name "${LVM_PREFIX}_loop" | xargs -r losetup -d + + find $B0 -name "${LVM_PREFIX}*" | xargs -r rm -rf + + find /run/gluster/snaps -name "*${LVM_PREFIX}*" | xargs -r rm -rf + + for file in `ls /run/gluster/snaps`; do + find /run/gluster/snaps/$file -mmin -2 | xargs -r rm -rf + done +} + +######################################################## +######################################################## +function _create_vhd() { + local dir=$1 + local num=$2 + local loop_num=`expr $2 + 8` + + fallocate -l${VHD_SIZE} $dir/${LVM_PREFIX}_vhd + mknod -m660 $dir/${LVM_PREFIX}_loop b 7 $loop_num + /sbin/losetup $dir/${LVM_PREFIX}_loop $dir/${LVM_PREFIX}_vhd +} + +function _create_lv() { + local dir=$1 + local num=$2 + local vg="VG$num" + + /sbin/pvcreate $dir/${LVM_PREFIX}_loop + /sbin/vgcreate ${!vg} $dir/${LVM_PREFIX}_loop + + /sbin/lvcreate -l 100%FREE -T /dev/${!vg}/thinpool + /sbin/lvcreate -V $VHD_SIZE -T /dev/${!vg}/thinpool -n brick_lvm + + mkfs.xfs -f /dev/${!vg}/brick_lvm +} + +function _mount_lv() { + local num=$1 + local vg="VG$num" + local l="L$num" + + mount -t xfs -o nouuid /dev/${!vg}/brick_lvm ${!l} +} + +function _umount_lv() { + local num=$1 + local l="L$num" + + umount -f ${!l} 2>/dev/null || true + rmdir ${!l} 2>/dev/null || true +} + +function _remove_lv() { + local num=$1 + local vg="VG$num" + + vgremove -f ${!vg} +} + +function _remove_vhd() { + local dir=$1 + + losetup -d $dir/${LVM_PREFIX}_loop + rm -f $dir/${LVM_PREFIX}_loop + rm -f $dir/${LVM_PREFIX}_vhd +} + +######################################################## +# Utility Functions +######################################################## +function snapshot_exists() { + local volname=$1 + local snapname=$2 + $CLI snapshot list $volname | egrep -q "^$snapname\$" + return $? +} + +#Create N number of snaps in a given volume +#Arg1 : <Volume Name> +#Arg2 : <Count of snaps to be created> +#Arg3 : <Snap Name Pattern> +#Return: Returns 0 if all snaps are created , +# if not will return exit code of last failed +# snap create command. +function create_n_snapshots() { + local vol=$1 + local snap_count=$2 + local snap_name=$3 + local ret=0 + for i in `seq 1 $snap_count`; do + $CLI_1 snapshot create $snap_name$i ${vol}& + PID_1=$! + wait $PID_1 + ret=$? + if [ "$ret" != "0" ]; then + break + fi + done + return $ret +} + + +#Delete N number of snaps in a given volume +#Arg1 : <Volume Name> +#Arg2 : <Count of snaps to be deleted> +#Arg3 : <Snap Name Pattern> +#Return: Returns 0 if all snaps are Delete, +# if not will return exit code of last failed +# snap delete command. +function delete_n_snapshots() { + local vol=$1 + local snap_count=$2 + local snap_name=$3 + local ret=0 + for i in `seq 1 $snap_count`; do + $CLI_1 snapshot delete $snap_name$i & + PID_1=$! + wait $PID_1 + temp=$? + if [ "$temp" != "0" ]; then + ret=$temp + fi + done + return $ret +} + +#Check for the existance of N number of snaps in a given volume +#Arg1 : <Volume Name> +#Arg2 : <Count of snaps to be checked> +#Arg3 : <Snap Name Pattern> +#Return: Returns 0 if all snaps exists, +# if not will return exit code of last failed +# snapshot_exists(). +function snapshot_n_exists() { + local vol=$1 + local snap_count=$2 + local snap_name=$3 + local ret=0 + for i in `seq 1 $snap_count`; do + snapshot_exists $snap_name$i + ret=$? + if [ "$ret" != "0" ]; then + break + fi + done + return $ret +} diff --git a/tests/volume.rc b/tests/volume.rc index 987122420..171f8d709 100644 --- a/tests/volume.rc +++ b/tests/volume.rc @@ -272,3 +272,54 @@ function get_backend_paths { getfattr -m . -n trusted.glusterfs.pathinfo $path | tr ' ' '\n' | sed -n 's/<POSIX.*:.*:\(.*\)>.*/\1/p' } + +function do_volume_operations() { + local operation=$1 + local count=$2 + local force=$3 + + local pids=() + local cli + local v + + for i in `seq 1 $count`; do + cli="CLI_$i" + v="V`expr $i - 1`" + ${!cli} volume $operation ${!v} $force & + pids[$i]=$! + done + + for i in `seq 1 $count`; do + wait ${pids[$i]} + done +} + +function start_volumes() { + do_volume_operations start $1 +} + +function stop_volumes() { + do_volume_operations stop $1 +} + +function start_force_volumes() { + do_volume_operations start $1 force +} + +function stop_force_volumes() { + do_volume_operations stop $1 force +} + +function delete_volumes() { + do_volume_operations delete $1 +} + +function volume_exists() { + local volname=$1 + $CLI volume info $volname 2>&1 | grep -q 'does not exist' + if [ $? -eq 0 ]; then + return 1 + else + return 0 + fi +} |
