From 64954eb3c58f4ef077e54e8a3726fd2d27419b12 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Fri, 26 Dec 2014 12:57:48 +0100 Subject: tests: move all test-cases into component subdirectories There are around 300 regression tests, 250 being in tests/bugs. Running partial set of tests/bugs is not easy because this is a flat directory with almost all tests inside. It would be valuable to make partial test/bugs easier, and allow the use of mulitple build hosts for a single commit, each running a subset of the tests for a quicker result. Additional changes made: - correct the include path for *.rc shell libraries and *.py utils - make the testcases pass checkpatch - arequal-checksum in afr/self-heal.t was never executed, now it is - include.rc now complains loudly if it fails to find env.rc Change-Id: I26ffd067e9853d3be1fd63b2f37d8aa0fd1b4fea BUG: 1178685 Reported-by: Emmanuel Dreyfus Reported-by: Atin Mukherjee URL: http://www.gluster.org/pipermail/gluster-devel/2014-December/043414.html Signed-off-by: Niels de Vos Reviewed-on: http://review.gluster.org/9353 Reviewed-by: Kaleb KEITHLEY Reviewed-by: Emmanuel Dreyfus Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/bugs/glusterfs-server/bug-852147.t | 85 ++++++++++++++++++++++++++++++ tests/bugs/glusterfs-server/bug-861542.t | 50 ++++++++++++++++++ tests/bugs/glusterfs-server/bug-864222.t | 27 ++++++++++ tests/bugs/glusterfs-server/bug-873549.t | 17 ++++++ tests/bugs/glusterfs-server/bug-877992.t | 61 ++++++++++++++++++++++ tests/bugs/glusterfs-server/bug-887145.t | 88 ++++++++++++++++++++++++++++++++ tests/bugs/glusterfs-server/bug-889996.t | 19 +++++++ tests/bugs/glusterfs-server/bug-904300.t | 62 ++++++++++++++++++++++ tests/bugs/glusterfs-server/bug-905864.c | 82 +++++++++++++++++++++++++++++ tests/bugs/glusterfs-server/bug-905864.t | 32 ++++++++++++ tests/bugs/glusterfs-server/bug-912297.t | 44 ++++++++++++++++ 11 files changed, 567 insertions(+) create mode 100755 tests/bugs/glusterfs-server/bug-852147.t create mode 100755 tests/bugs/glusterfs-server/bug-861542.t create mode 100755 tests/bugs/glusterfs-server/bug-864222.t create mode 100644 tests/bugs/glusterfs-server/bug-873549.t create mode 100755 tests/bugs/glusterfs-server/bug-877992.t create mode 100755 tests/bugs/glusterfs-server/bug-887145.t create mode 100644 tests/bugs/glusterfs-server/bug-889996.t create mode 100755 tests/bugs/glusterfs-server/bug-904300.t create mode 100644 tests/bugs/glusterfs-server/bug-905864.c create mode 100644 tests/bugs/glusterfs-server/bug-905864.t create mode 100755 tests/bugs/glusterfs-server/bug-912297.t (limited to 'tests/bugs/glusterfs-server') diff --git a/tests/bugs/glusterfs-server/bug-852147.t b/tests/bugs/glusterfs-server/bug-852147.t new file mode 100755 index 00000000000..8cb5fd13f85 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-852147.t @@ -0,0 +1,85 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +logdir=`gluster --print-logdir`"/bricks" + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs -s $H0 --volfile-id=$V0 $M0 +touch $M0/file1; + +TEST $CLI volume set $V0 performance.cache-max-file-size 20MB +TEST $CLI volume set $V0 performance.cache-min-file-size 10MB + +EXPECT "20MB" volinfo_field $V0 'performance.cache-max-file-size'; +EXPECT "10MB" volinfo_field $V0 'performance.cache-min-file-size'; + +#Performing volume reset and verifying. +TEST $CLI volume reset $V0 +EXPECT "" volinfo_field $V0 'performance.cache-max-file-size'; +EXPECT "" volinfo_field $V0 'performance.cache-min-file-size'; + +#Verifying vlolume-profile start, info and stop +EXPECT "Starting volume profile on $V0 has been successful " $CLI volume profile $V0 start + +function vol_prof_info() +{ + $CLI volume profile $V0 info | grep Brick | wc -l +} +EXPECT "8" vol_prof_info + +EXPECT "Stopping volume profile on $V0 has been successful " $CLI volume profile $V0 stop + +function log-file-name() +{ + logfilename=$B0"/"$V0"1.log" + echo ${logfilename:1} | tr / - +} + +function file-size() +{ + ls -lrt $1 | awk '{print $5}' +} + +#Finding the current log file's size +log_file=$logdir"/"`log-file-name` +log_file_size=`file-size $log_file` + +#Removing the old backup log files +ren_file=$log_file".*" +rm -rf $ren_file + +#Initiating log rotate +TEST $CLI volume log rotate $V0 + +#Capturing new log file's size +new_file_size=`file-size $log_file` + +#Verifying the size of the new log file and the creation of the backup log file +TEST ! [ $new_file_size -eq $log_file_size ] +TEST ls -lrt $ren_file + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-861542.t b/tests/bugs/glusterfs-server/bug-861542.t new file mode 100755 index 00000000000..ab572963bb0 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-861542.t @@ -0,0 +1,50 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; +# Distributed volume with a single brick was chosen solely for the ease of +#implementing the test case (to be precise, for the ease of extracting the port number). +TEST $CLI volume create $V0 $H0:$B0/brick0; + +TEST $CLI volume start $V0; + +function port_field() +{ + local vol=$1; + local opt=$2; + if [ $opt -eq '0' ]; then + $CLI volume status $vol | grep "brick0" | awk '{print $3}'; + else + $CLI volume status $vol detail | grep "^Port " | awk '{print $3}'; + fi +} + +function xml_port_field() +{ + local vol=$1; + local opt=$2; + $CLI --xml volume status $vol $opt | tr -d '\n' |\ +#Find the first occurrence of the string between and + sed -rn 's//&###/;s/<\/port>/###&/;s/^.*###(.*)###.*$/\1/p' +} + +TEST $CLI volume status $V0; +TEST $CLI volume status $V0 detail; +TEST $CLI --xml volume status $V0; +TEST $CLI --xml volume status $V0 detail; + +# Kill the brick process. After this, port number for the killed (in this case brick) process must be "N/A". +kill `cat $GLUSTERD_WORKDIR/vols/$V0/run/$H0-d-backends-brick0.pid` + +EXPECT "N/A" port_field $V0 '0'; # volume status +EXPECT "N/A" port_field $V0 '1'; # volume status detail + +EXPECT "N/A" xml_port_field $V0 ''; +EXPECT "N/A" xml_port_field $V0 'detail'; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-864222.t b/tests/bugs/glusterfs-server/bug-864222.t new file mode 100755 index 00000000000..cbda7d27f38 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-864222.t @@ -0,0 +1,27 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 $H0:$B0/brick0 +TEST $CLI volume start $V0 + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock +cd $N0 + +TEST ls + +TEST $CLI volume set $V0 nfs.enable-ino32 on +# Main test. This should pass. +TEST ls + +cd +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +cleanup + diff --git a/tests/bugs/glusterfs-server/bug-873549.t b/tests/bugs/glusterfs-server/bug-873549.t new file mode 100644 index 00000000000..a3b2f9c9bf7 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-873549.t @@ -0,0 +1,17 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd -LDEBUG; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +TEST $CLI volume set $V0 performance.cache-size 512MB +TEST $CLI volume start $V0 +TEST $CLI volume statedump $V0 all + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-877992.t b/tests/bugs/glusterfs-server/bug-877992.t new file mode 100755 index 00000000000..c0287e7594a --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-877992.t @@ -0,0 +1,61 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + + +## Start and create a volume +TEST glusterd -LDEBUG +TEST pidof glusterd + + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + + +function hooks_prep () +{ + local event=$1 + touch /tmp/pre.out /tmp/post.out + touch $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh + touch $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh + + printf "#! /bin/bash\necho "$event"Pre > /tmp/pre.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh + printf "#! /bin/bash\necho "$event"Post > /tmp/post.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh + chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh + chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh +} + +function hooks_cleanup () +{ + local event=$1 + rm /tmp/pre.out /tmp/post.out + rm $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh + rm $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh +} + +## Verify volume is created and its hooks script ran +hooks_prep 'create' +TEST $CLI volume create $V0 $H0:$B0/${V0}1; +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'createPre' cat /tmp/pre.out; +EXPECT 'createPost' cat /tmp/post.out; +hooks_cleanup 'create' + + +## Start volume and verify that its hooks script ran +hooks_prep 'start' +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'startPre' cat /tmp/pre.out; +EXPECT 'startPost' cat /tmp/post.out; +hooks_cleanup 'start' + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-887145.t b/tests/bugs/glusterfs-server/bug-887145.t new file mode 100755 index 00000000000..35e1c928390 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-887145.t @@ -0,0 +1,88 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; +TEST $CLI volume set $V0 performance.open-behind off; +TEST $CLI volume start $V0 + +## Mount FUSE with caching disabled +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + + +useradd tmp_user 2>/dev/null 1>/dev/null; +mkdir $M0/dir; +mkdir $M0/other; +cp /etc/passwd $M0/; +cp $M0/passwd $M0/file; +chmod 600 $M0/file; + +TEST mount_nfs $H0:/$V0 $N0 nolock; + +chown -R nfsnobody:nfsnobody $M0/dir; +chown -R tmp_user:tmp_user $M0/other; + +TEST $CLI volume set $V0 server.root-squash on; + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +# create files and directories in the root of the glusterfs and nfs mount +# which is owned by root and hence the right behavior is getting EACCESS +# as the fops are executed as nfsnobody. +touch $M0/foo 2>/dev/null; +TEST [ $? -ne 0 ] +touch $N0/foo 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $M0/new 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $N0/new 2>/dev/null; +TEST [ $? -ne 0 ] +cp $M0/file $M0/tmp_file 2>/dev/null; +TEST [ $? -ne 0 ] +cp $N0/file $N0/tmp_file 2>/dev/null; +TEST [ $? -ne 0 ] +cat $M0/file 2>/dev/null; +TEST [ $? -ne 0 ] +# here read should be allowed because eventhough file "passwd" is owned +# by root, the permissions if the file allow other users to read it. +cat $M0/passwd 1>/dev/null; +TEST [ $? -eq 0 ] +cat $N0/passwd 1>/dev/null; +TEST [ $? -eq 0 ] + +# create files and directories should succeed as the fops are being executed +# inside the directory owned by nfsnobody +TEST touch $M0/dir/file; +TEST touch $N0/dir/foo; +TEST mkdir $M0/dir/new; +TEST mkdir $N0/dir/other; +TEST rm -f $M0/dir/file $M0/dir/foo; +TEST rmdir $N0/dir/*; + +# create files and directories here should fail as other directory is owned +# by tmp_user. +touch $M0/other/foo 2>/dev/null; +TEST [ $? -ne 0 ] +touch $N0/other/foo 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $M0/other/new 2>/dev/null; +TEST [ $? -ne 0 ] +mkdir $N0/other/new 2>/dev/null; +TEST [ $? -ne 0 ] + +userdel tmp_user; +rm -rf /home/tmp_user; + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-889996.t b/tests/bugs/glusterfs-server/bug-889996.t new file mode 100644 index 00000000000..d7d25c42933 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-889996.t @@ -0,0 +1,19 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}; + +rm -rf $B0/${V0}1; + +TEST ! $CLI volume start $V0; +EXPECT 0 online_brick_count; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-904300.t b/tests/bugs/glusterfs-server/bug-904300.t new file mode 100755 index 00000000000..8ce805cfcdd --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-904300.t @@ -0,0 +1,62 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +# 1-8 +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/$V0; +TEST $CLI volume start $V0 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available + +TEST mount_nfs $H0:/$V0 $N0 nolock +TEST mkdir $N0/dir1 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# +# Case 1: Allow "dir1" to be mounted only from 127.0.0.1 +# 9-12 +TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.1)"\" +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available + +TEST mount_nfs localhost:/$V0/dir1 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# +# Case 2: Allow "dir1" to be mounted only from 8.8.8.8. This is +# a negative test case therefore the mount should fail. +# 13-16 +TEST $CLI volume set $V0 export-dir \""/dir1(8.8.8.8)"\" +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available + +TEST ! mount_nfs $H0:/$V0/dir1 $N0 nolock +TEST ! umount $N0 + + +# Case 3: Variation of test case1. Here we are checking with hostname +# instead of ip address. +# 17-20 +TEST $CLI volume set $V0 export-dir \""/dir1($H0)"\" +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available + +TEST mount_nfs $H0:/$V0/dir1 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +# Case 4: Variation of test case1. Here we are checking with IP range +# 21-24 +TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.0/24)"\" +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available + +TEST mount_nfs localhost:/$V0/dir1 $N0 nolock +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +## Finish up +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/glusterfs-server/bug-905864.c b/tests/bugs/glusterfs-server/bug-905864.c new file mode 100644 index 00000000000..3cc4cc5d232 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-905864.c @@ -0,0 +1,82 @@ +#include +#include +#include +#include +#include + + +pthread_t th[5] = {0}; +void +flock_init (struct flock *f, short int type, off_t start, off_t len) +{ + f->l_type = type; + f->l_start = start; + f->l_len = len; +} + +int +flock_range_in_steps (int fd, int is_set, short l_type, + int start, int end, int step) +{ + int ret = 0; + int i = 0; + struct flock f = {0,}; + + for (i = start; i+step < end; i += step) { + flock_init (&f, l_type, i, step); + ret = fcntl (fd, (is_set) ? F_SETLKW : F_GETLK, &f); + if (ret) { + perror ("fcntl"); + goto out; + } + } +out: + return ret; +} + +void * +random_locker (void *arg) +{ + int fd = *(int *)arg; + int i = 0; + int is_set = 0; + + /* use thread id to choose GETLK or SETLK operation*/ + is_set = pthread_self () % 2; + (void)flock_range_in_steps (fd, is_set, F_WRLCK, 0, 400, 1); + + return NULL; +} + + +int main (int argc, char **argv) +{ + int fd = -1; + int ret = 1; + int i = 0; + char *fname = NULL; + + if (argc < 2) + goto out; + + fname = argv[1]; + fd = open (fname, O_RDWR); + if (fd == -1) { + perror ("open"); + goto out; + } + + ret = flock_range_in_steps (fd, 1, F_WRLCK, 0, 2000, 2); + for (i = 0; i < 5; i++) { + pthread_create (&th[i], NULL, random_locker, (void *) &fd); + } + ret = flock_range_in_steps (fd, 1, F_WRLCK, 0, 2000, 2); + for (i = 0; i < 5; i++) { + pthread_join (th[i], NULL); + } +out: + if (fd != -1) + close (fd); + + return ret; +} diff --git a/tests/bugs/glusterfs-server/bug-905864.t b/tests/bugs/glusterfs-server/bug-905864.t new file mode 100644 index 00000000000..44923a85333 --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-905864.t @@ -0,0 +1,32 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; +TEST $CLI volume start $V0; + +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1; + +TEST touch $M0/file1; + +#following C program tries open up race(s) if any, in F_GETLK/F_SETLKW codepaths +#of locks xlator +TEST $CC -pthread -g3 $(dirname $0)/bug-905864.c -o $(dirname $0)/bug-905864 + +$(dirname $0)/bug-905864 $M0/file1 & +$(dirname $0)/bug-905864 $M1/file1; +wait + +TEST rm -f $(dirname $0)/bug-905864 +EXPECT $(brick_count $V0) online_brick_count + +cleanup diff --git a/tests/bugs/glusterfs-server/bug-912297.t b/tests/bugs/glusterfs-server/bug-912297.t new file mode 100755 index 00000000000..f1f4147e6aa --- /dev/null +++ b/tests/bugs/glusterfs-server/bug-912297.t @@ -0,0 +1,44 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Setting owner-uid as -12 +TEST ! $CLI volume set $V0 owner-uid -12 +EXPECT '' volinfo_field $V0 'storage.owner-uid' + +## Setting owner-gid as -5 +TEST ! $CLI volume set $V0 owner-gid -5 +EXPECT '' volinfo_field $V0 'storage.owner-gid' + +## Setting owner-uid as 36 +TEST $CLI volume set $V0 owner-uid 36 +EXPECT '36' volinfo_field $V0 'storage.owner-uid' + +## Setting owner-gid as 36 +TEST $CLI volume set $V0 owner-gid 36 +EXPECT '36' volinfo_field $V0 'storage.owner-gid' + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; -- cgit