From 64954eb3c58f4ef077e54e8a3726fd2d27419b12 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Fri, 26 Dec 2014 12:57:48 +0100 Subject: tests: move all test-cases into component subdirectories There are around 300 regression tests, 250 being in tests/bugs. Running partial set of tests/bugs is not easy because this is a flat directory with almost all tests inside. It would be valuable to make partial test/bugs easier, and allow the use of mulitple build hosts for a single commit, each running a subset of the tests for a quicker result. Additional changes made: - correct the include path for *.rc shell libraries and *.py utils - make the testcases pass checkpatch - arequal-checksum in afr/self-heal.t was never executed, now it is - include.rc now complains loudly if it fails to find env.rc Change-Id: I26ffd067e9853d3be1fd63b2f37d8aa0fd1b4fea BUG: 1178685 Reported-by: Emmanuel Dreyfus Reported-by: Atin Mukherjee URL: http://www.gluster.org/pipermail/gluster-devel/2014-December/043414.html Signed-off-by: Niels de Vos Reviewed-on: http://review.gluster.org/9353 Reviewed-by: Kaleb KEITHLEY Reviewed-by: Emmanuel Dreyfus Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/bugs/replicate/886998/strict-readdir.t | 52 +++++++++ tests/bugs/replicate/bug-1015990-rep.t | 80 ++++++++++++++ tests/bugs/replicate/bug-1015990.t | 95 ++++++++++++++++ tests/bugs/replicate/bug-1032927.t | 32 ++++++ tests/bugs/replicate/bug-1037501.t | 104 ++++++++++++++++++ tests/bugs/replicate/bug-1046624.t | 46 ++++++++ tests/bugs/replicate/bug-1058797.t | 45 ++++++++ tests/bugs/replicate/bug-1101647.t | 29 +++++ tests/bugs/replicate/bug-1130892.t | 60 +++++++++++ tests/bugs/replicate/bug-1132102.t | 28 +++++ .../bug-1134691-afr-lookup-metadata-heal.t | 50 +++++++++ tests/bugs/replicate/bug-1139230.t | 58 ++++++++++ tests/bugs/replicate/bug-765564.t | 86 +++++++++++++++ tests/bugs/replicate/bug-767585-gfid.t | 42 ++++++++ tests/bugs/replicate/bug-802417.t | 108 +++++++++++++++++++ tests/bugs/replicate/bug-821056.t | 52 +++++++++ tests/bugs/replicate/bug-830665.t | 120 +++++++++++++++++++++ tests/bugs/replicate/bug-853680.t | 53 +++++++++ tests/bugs/replicate/bug-859581.t | 53 +++++++++ tests/bugs/replicate/bug-865825.t | 82 ++++++++++++++ tests/bugs/replicate/bug-880898.t | 23 ++++ tests/bugs/replicate/bug-884328.t | 12 +++ tests/bugs/replicate/bug-886998.t | 52 +++++++++ tests/bugs/replicate/bug-888174.t | 62 +++++++++++ tests/bugs/replicate/bug-913051.t | 67 ++++++++++++ tests/bugs/replicate/bug-916226.t | 26 +++++ tests/bugs/replicate/bug-918437-sh-mtime.t | 71 ++++++++++++ tests/bugs/replicate/bug-921231.t | 31 ++++++ tests/bugs/replicate/bug-957877.t | 33 ++++++ tests/bugs/replicate/bug-966018.t | 35 ++++++ tests/bugs/replicate/bug-976800.t | 28 +++++ tests/bugs/replicate/bug-977797.t | 95 ++++++++++++++++ tests/bugs/replicate/bug-978794.t | 29 +++++ tests/bugs/replicate/bug-979365.t | 47 ++++++++ tests/bugs/replicate/bug-986905.t | 27 +++++ 35 files changed, 1913 insertions(+) create mode 100644 tests/bugs/replicate/886998/strict-readdir.t create mode 100755 tests/bugs/replicate/bug-1015990-rep.t create mode 100755 tests/bugs/replicate/bug-1015990.t create mode 100644 tests/bugs/replicate/bug-1032927.t create mode 100755 tests/bugs/replicate/bug-1037501.t create mode 100755 tests/bugs/replicate/bug-1046624.t create mode 100644 tests/bugs/replicate/bug-1058797.t create mode 100644 tests/bugs/replicate/bug-1101647.t create mode 100644 tests/bugs/replicate/bug-1130892.t create mode 100644 tests/bugs/replicate/bug-1132102.t create mode 100644 tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t create mode 100644 tests/bugs/replicate/bug-1139230.t create mode 100644 tests/bugs/replicate/bug-765564.t create mode 100755 tests/bugs/replicate/bug-767585-gfid.t create mode 100755 tests/bugs/replicate/bug-802417.t create mode 100644 tests/bugs/replicate/bug-821056.t create mode 100755 tests/bugs/replicate/bug-830665.t create mode 100755 tests/bugs/replicate/bug-853680.t create mode 100755 tests/bugs/replicate/bug-859581.t create mode 100755 tests/bugs/replicate/bug-865825.t create mode 100644 tests/bugs/replicate/bug-880898.t create mode 100644 tests/bugs/replicate/bug-884328.t create mode 100644 tests/bugs/replicate/bug-886998.t create mode 100644 tests/bugs/replicate/bug-888174.t create mode 100644 tests/bugs/replicate/bug-913051.t create mode 100644 tests/bugs/replicate/bug-916226.t create mode 100644 tests/bugs/replicate/bug-918437-sh-mtime.t create mode 100644 tests/bugs/replicate/bug-921231.t create mode 100644 tests/bugs/replicate/bug-957877.t create mode 100644 tests/bugs/replicate/bug-966018.t create mode 100644 tests/bugs/replicate/bug-976800.t create mode 100755 tests/bugs/replicate/bug-977797.t create mode 100644 tests/bugs/replicate/bug-978794.t create mode 100755 tests/bugs/replicate/bug-979365.t create mode 100755 tests/bugs/replicate/bug-986905.t (limited to 'tests/bugs/replicate') diff --git a/tests/bugs/replicate/886998/strict-readdir.t b/tests/bugs/replicate/886998/strict-readdir.t new file mode 100644 index 00000000000..63fe313b201 --- /dev/null +++ b/tests/bugs/replicate/886998/strict-readdir.t @@ -0,0 +1,52 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc + +function num_files_in_dir { + d=$1 + ls $d | sort | uniq | wc -l +} + +#Basic sanity tests for readdir functionality +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/r2d2_0 $H0:$B0/r2d2_1 $H0:$B0/r2d2_2 $H0:$B0/r2d2_3 +TEST $CLI volume start $V0 +TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0 + +TEST touch $M0/{1..100} +EXPECT "100" num_files_in_dir $M0 + +TEST kill_brick $V0 $H0 $B0/r2d2_0 +TEST kill_brick $V0 $H0 $B0/r2d2_2 +EXPECT "100" num_files_in_dir $M0 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +TEST kill_brick $V0 $H0 $B0/r2d2_1 +TEST kill_brick $V0 $H0 $B0/r2d2_3 +EXPECT "100" num_files_in_dir $M0 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 3 + +TEST $CLI volume set $V0 cluster.strict-readdir on +EXPECT "on" volinfo_field $V0 cluster.strict-readdir +TEST kill_brick $V0 $H0 $B0/r2d2_0 +TEST kill_brick $V0 $H0 $B0/r2d2_2 +EXPECT "100" num_files_in_dir $M0 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +TEST kill_brick $V0 $H0 $B0/r2d2_1 +TEST kill_brick $V0 $H0 $B0/r2d2_3 +EXPECT "100" num_files_in_dir $M0 +cleanup; diff --git a/tests/bugs/replicate/bug-1015990-rep.t b/tests/bugs/replicate/bug-1015990-rep.t new file mode 100755 index 00000000000..4e959e6e70e --- /dev/null +++ b/tests/bugs/replicate/bug-1015990-rep.t @@ -0,0 +1,80 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + + + +TEST kill_brick $V0 $H0 $B0/$V0"1" +sleep 5 +TEST kill_brick $V0 $H0 $B0/$V0"3" +sleep 5 + +for i in {1..100}; do echo "STRING" > $M0/File$i; done + +brick_2_sh_entries=$(count_sh_entries $B0/$V0"2") +brick_4_sh_entries=$(count_sh_entries $B0/$V0"4") + +command_output=$(gluster volume heal $V0 statistics heal-count replica $H0:$B0/$V0"1") + + +substring="Number of entries:" +count=0 +while read -r line; +do + if [[ "$line" == *$substring* ]] + then + value=$(echo $line | cut -f 2 -d :) + count=$(($count + $value)) + fi + +done <<< "$command_output" + +brick_2_entries_count=$(($count-$value)) + +EXPECT "0" echo $brick_2_entries_count + +brick_2_entries_count=$count + + +xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2") +##Remove the count of the xattrop-gfid entry count as it does not contribute +##to the number of files to be healed + +sub_val=1 +xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val)) + +ret=0 +if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ] + then + ret=$(($ret + $sub_val)) +fi + +EXPECT "1" echo $ret +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0 + +cleanup; diff --git a/tests/bugs/replicate/bug-1015990.t b/tests/bugs/replicate/bug-1015990.t new file mode 100755 index 00000000000..48181c00329 --- /dev/null +++ b/tests/bugs/replicate/bug-1015990.t @@ -0,0 +1,95 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + + + +TEST kill_brick $V0 $H0 $B0/$V0"1" +sleep 5 +TEST kill_brick $V0 $H0 $B0/$V0"3" +sleep 5 + +for i in {1..100}; do echo "STRING" > $M0/File$i; done + +brick_2_sh_entries=$(count_sh_entries $B0/$V0"2") +brick_4_sh_entries=$(count_sh_entries $B0/$V0"4") + + +command_output=$(gluster volume heal $V0 statistics heal-count) + + +substring="Number of entries:" +count=0 +while read -r line; +do + if [[ "$line" == *$substring* ]] + then + value=$(echo $line | cut -f 2 -d :) + count=$(($count + $value)) + fi + +done <<< "$command_output" + +brick_2_entries_count=$(($count-$value)) +brick_4_entries_count=$value + + +xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2") +##Remove the count of the xattrop-gfid entry count as it does not contribute +##to the number of files to be healed + +sub_val=1 +xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val)) + +xattrop_count_brick_4=$(count_sh_entries $B0/$V0"4") +##Remove xattrop-gfid entry count + +xattrop_count_brick_4=$(($xattrop_count_brick_4-$sub_val)) + + +ret=0 +if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ] + then + ret=$(($ret + $sub_val)) +fi + +EXPECT "1" echo $ret + + +ret=0 +if [ "$xattrop_count_brick_4" -eq "$brick_4_entries_count" ] + then + ret=$(($ret + $sub_val)) +fi + +EXPECT "1" echo $ret + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0 + +cleanup; + diff --git a/tests/bugs/replicate/bug-1032927.t b/tests/bugs/replicate/bug-1032927.t new file mode 100644 index 00000000000..eb663d03fed --- /dev/null +++ b/tests/bugs/replicate/bug-1032927.t @@ -0,0 +1,32 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This tests if pathinfo getxattr fails when one of the bricks is down +#Lets hope it doesn't + +cleanup; +function get_pathinfo_in_loop { + failed=0 + for i in {1..1000} + do + getfattr -n trusted.glusterfs.pathinfo $M0 2>/dev/null + if [ $? -ne 0 ]; then failed=1;break; fi + done + return $failed +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +cd $M0 +TEST kill_brick $V0 $H0 $B0/${V0}1 + +#when one of the bricks is down getfattr of pathinfo should not fail +#Lets just do the test for 1000 times to see if we hit the race +TEST get_pathinfo_in_loop + +cleanup diff --git a/tests/bugs/replicate/bug-1037501.t b/tests/bugs/replicate/bug-1037501.t new file mode 100755 index 00000000000..ce079555b50 --- /dev/null +++ b/tests/bugs/replicate/bug-1037501.t @@ -0,0 +1,104 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function write_file() +{ + path="$1"; shift + echo "$*" > "$path" +} + +cleanup; +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Start and create a volume +mkdir -p ${B0}/${V0}-0 +mkdir -p ${B0}/${V0}-1 +mkdir -p ${B0}/${V0}-2 +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2} + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +TEST `echo "TEST-FILE" > $M0/File` +TEST `mkdir $M0/Dir` +TEST `ln $M0/File $M0/Link` +TEST `mknod $M0/FIFO p` + +TEST $CLI volume add-brick $V0 replica 4 $H0:$B0/$V0-3 force +TEST $CLI volume add-brick $V0 replica 5 $H0:$B0/$V0-4 force +TEST $CLI volume add-brick $V0 replica 6 $H0:$B0/$V0-5 force + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5 +TEST gluster volume heal $V0 full +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/File +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/File + +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Link +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Link + +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Dir +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Dir + +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/FIFO +EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/FIFO + +EXPECT 10 stat -c '%s' $B0/$V0-0/File +EXPECT 10 stat -c '%s' $B0/$V0-1/File +EXPECT 10 stat -c '%s' $B0/$V0-2/File +EXPECT 10 stat -c '%s' $B0/$V0-3/File +EXPECT 10 stat -c '%s' $B0/$V0-4/File +EXPECT 10 stat -c '%s' $B0/$V0-5/File + +EXPECT 3 stat -c '%h' $B0/$V0-0/Link +EXPECT 3 stat -c '%h' $B0/$V0-1/Link +EXPECT 3 stat -c '%h' $B0/$V0-2/Link +EXPECT 3 stat -c '%h' $B0/$V0-3/Link +EXPECT 3 stat -c '%h' $B0/$V0-4/Link +EXPECT 3 stat -c '%h' $B0/$V0-5/Link + +EXPECT 'directory' stat -c '%F' $B0/$V0-0/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-1/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-2/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-3/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-4/Dir +EXPECT 'directory' stat -c '%F' $B0/$V0-5/Dir + +EXPECT 'fifo' stat -c '%F' $B0/$V0-0/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-1/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-2/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-3/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-4/FIFO +EXPECT 'fifo' stat -c '%F' $B0/$V0-5/FIFO + +cleanup; diff --git a/tests/bugs/replicate/bug-1046624.t b/tests/bugs/replicate/bug-1046624.t new file mode 100755 index 00000000000..9ae40879228 --- /dev/null +++ b/tests/bugs/replicate/bug-1046624.t @@ -0,0 +1,46 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +## Start and create a volume +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + + +## Make sure automatic self-heal doesn't perturb our results. +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 stat-prefetch off +TEST $CLI volume set $V0 background-self-heal-count 0 + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 --use-readdirp=no + +TEST `echo "TEST-FILE" > $M0/File` +TEST `mkdir $M0/Dir` +TEST kill_brick $V0 $H0 $B0/${V0}-0 + +TEST `ln -s $M0/File $M0/Link1` +TEST `ln -s $M0/Dir $M0/Link2` + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 + +TEST `find $M0/ 2>/dev/null 1>/dev/null` +TEST `find $M0/ | xargs stat 2>/dev/null 1>/dev/null` + +TEST stat $B0/${V0}-0/Link1 +TEST stat $B0/${V0}-0/Link2 + +cleanup; diff --git a/tests/bugs/replicate/bug-1058797.t b/tests/bugs/replicate/bug-1058797.t new file mode 100644 index 00000000000..99ab3eb3a66 --- /dev/null +++ b/tests/bugs/replicate/bug-1058797.t @@ -0,0 +1,45 @@ +#!/bin/bash +#Test that the setuid bit is healed correctly. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +#Basic checks +TEST glusterd + +#Create a 1x2 replica volume +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}; +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.self-heal-daemon off + +# FUSE mount;create a file +TEST glusterfs -s $H0 --volfile-id $V0 $M0 +TEST touch $M0/file + +#Kill brick1 and set S_ISUID and S_ISGID bits from mount point +kill_brick $V0 $H0 $B0/brick1 +TEST chmod +x,+s $M0/file + +#Get file permissions from backend brick0 and verify that S_ISUID is indeed set +file_permissions1=`ls -l $B0/brick0/file | awk '{print $1}'| cut -d. -f1 | cut -d- -f2,3,4,5,6` +setuid_bit1=`echo $file_permissions1 | cut -b3` +EXPECT "s" echo $setuid_bit1 + +#Restart volume and do lookup from mount to trigger heal +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +TEST dd if=$M0/file of=/dev/null + +#Get file permissions from healed brick1 and verify that S_ISUID is indeed set +file_permissions2=`ls -l $B0/brick1/file | awk '{print $1}' | cut -d. -f1 | cut -d- -f2,3,4,5,6` +setuid_bit2=`echo $file_permissions2 | cut -b3` +EXPECT "s" echo $setuid_bit2 + +#Also compare the entire permission string,just to be sure +EXPECT $file_permissions1 echo $file_permissions2 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-1101647.t b/tests/bugs/replicate/bug-1101647.t new file mode 100644 index 00000000000..148af987f20 --- /dev/null +++ b/tests/bugs/replicate/bug-1101647.t @@ -0,0 +1,29 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; +TEST $CLI volume start $V0; +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +EXPECT_WITHIN 20 "Y" glustershd_up_status + +#Create base entry in indices/xattrop and indices/base_indices_holder +echo "Data">$M0/file + +TEST $CLI volume heal $V0 +#Entries from indices/xattrop and indices/base_indices_holder should not be cleared after a heal. +EXPECT 1 count_sh_entries $B0/$V0"1" +EXPECT 1 count_sh_entries $B0/$V0"2" + +TEST kill_brick $V0 $H0 $B0/${V0}2 +echo "More data">>$M0/file + +EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count|grep "Number of entries:"|head -n1|awk '{print $4}'` + +cleanup; diff --git a/tests/bugs/replicate/bug-1130892.t b/tests/bugs/replicate/bug-1130892.t new file mode 100644 index 00000000000..0840ffbb0b9 --- /dev/null +++ b/tests/bugs/replicate/bug-1130892.t @@ -0,0 +1,60 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Create a 1X2 replica +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} +EXPECT 'Created' volinfo_field $V0 'Status'; + +# Disable self-heal daemon +TEST gluster volume set $V0 self-heal-daemon off + +# Disable all perf-xlators +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off + +# Volume start +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +# FUSE Mount +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +# Create files and dirs +TEST mkdir -p $M0/one/two/ +TEST `echo "Carpe diem" > $M0/one/two/three` + +# Simulate disk-replacement +TEST kill_brick $V0 $H0 $B0/${V0}-1 +TEST rm -rf $B0/${V0}-1/one +TEST rm -rf $B0/${V0}-1/.glusterfs + +# Start force +TEST $CLI volume start $V0 force + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +TEST stat $M0/one + +# Check pending xattrs +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata + +TEST gluster volume set $V0 self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two/three + +cleanup; diff --git a/tests/bugs/replicate/bug-1132102.t b/tests/bugs/replicate/bug-1132102.t new file mode 100644 index 00000000000..c7dbbf818aa --- /dev/null +++ b/tests/bugs/replicate/bug-1132102.t @@ -0,0 +1,28 @@ +#!/bin/bash + +#This tests that mknod and create fops mark necessary pending changelog +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +cd $M0 +TEST mkfifo fifo +TEST mknod block b 0 0 +TEST touch a +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 data +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 entry +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 metadata +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 data +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 entry +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 metadata +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 data +EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 entry +EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 metadata +cleanup diff --git a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t new file mode 100644 index 00000000000..f43c7cea551 --- /dev/null +++ b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t @@ -0,0 +1,50 @@ +#!/bin/bash +#### Test iatt and user xattr heal from lookup code path #### + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2} +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +cd $M0 +TEST touch file +TEST setfattr -n user.attribute1 -v "value" $B0/brick0/file +TEST kill_brick $V0 $H0 $B0/brick2 +TEST chmod +x file +iatt=$(stat -c "%g:%u:%A" file) + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +#Trigger metadataheal +TEST stat file + +#iattrs must be matching +iatt1=$(stat -c "%g:%u:%A" $B0/brick0/file) +iatt2=$(stat -c "%g:%u:%A" $B0/brick1/file) +iatt3=$(stat -c "%g:%u:%A" $B0/brick2/file) +EXPECT $iatt echo $iatt1 +EXPECT $iatt echo $iatt2 +EXPECT $iatt echo $iatt3 + +#xattrs must be matching +xatt1_cnt=$(getfattr -d $B0/brick0/file|wc|awk '{print $1}') +xatt2_cnt=$(getfattr -d $B0/brick1/file|wc|awk '{print $1}') +xatt3_cnt=$(getfattr -d $B0/brick2/file|wc|awk '{print $1}') +EXPECT "$xatt1_cnt" echo $xatt2_cnt +EXPECT "$xatt1_cnt" echo $xatt3_cnt + +#changelogs must be zero +xattr1=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick0/file) +xattr2=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick1/file) +EXPECT "000000000000000000000000" echo $xattr1 +EXPECT "000000000000000000000000" echo $xattr2 + +cd - +cleanup; diff --git a/tests/bugs/replicate/bug-1139230.t b/tests/bugs/replicate/bug-1139230.t new file mode 100644 index 00000000000..9ceac6c4f4e --- /dev/null +++ b/tests/bugs/replicate/bug-1139230.t @@ -0,0 +1,58 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +# Create a 1X2 replica +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} +EXPECT 'Created' volinfo_field $V0 'Status'; + +# Volume start +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +# FUSE Mount +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +TEST mkdir -p $M0/one + +# Kill a brick +TEST kill_brick $V0 $H0 $B0/${V0}-1 + +TEST `echo "A long" > $M0/one/two` + +# Start force +TEST $CLI volume start $V0 force + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one +EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two + +# Pending xattrs should be set for all the bricks once self-heal is done +# Check pending xattrs +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-0 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-0 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-1 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.dirty +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.dirty + +TEST `echo "time ago" > $M0/one/three` + +# Pending xattrs should be set for all the bricks once transaction is done +# Check pending xattrs +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-0 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-1 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-0 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-1 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.dirty +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.dirty + +cleanup; diff --git a/tests/bugs/replicate/bug-765564.t b/tests/bugs/replicate/bug-765564.t new file mode 100644 index 00000000000..098d225018f --- /dev/null +++ b/tests/bugs/replicate/bug-765564.t @@ -0,0 +1,86 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +## Start and create a volume +mkdir -p ${B0}/${V0}-0 +mkdir -p ${B0}/${V0}-1 +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} + +TEST $CLI volume set $V0 performance.io-cache off; +TEST $CLI volume set $V0 performance.write-behind off; +TEST $CLI volume set $V0 performance.stat-prefetch off + +TEST $CLI volume start $V0; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +#returns success if 'olddir' is absent +#'olddir' must be absent in both replicas +function rm_succeeded () { + local dir1=$1 + [[ -d $H0:$B0/${V0}-0/$dir1 || -d $H0:$B0/${V0}-1/$dir1 ]] && return 0 + return 1 +} + +# returns successes if 'newdir' is present +#'newdir' must be present in both replicas +function mv_succeeded () { + local dir1=$1 + [[ -d $H0:$B0/${V0}-0/$dir1 && -d $H0:$B0/${V0}-1/$dir1 ]] && return 1 + return 0 +} + +# returns zero on success +# Only one of rm and mv can succeed. This is captured by the XOR below + +function chk_backend_consistency(){ + local dir1=$1 + local dir2=$2 + local rm_status=rm_succeeded $dir1 + local mv_status=mv_succeeded $dir2 + [[ ( $rm_status && ! $mv_status ) || ( ! $rm_status && $mv_status ) ]] && return 0 + return 1 +} + +#concurrent removal/rename of dirs +function rm_mv_correctness () { + ret=0 + for i in {1..100}; do + mkdir $M0/"dir"$i + rmdir $M0/"dir"$i & + mv $M0/"dir"$i $M0/"adir"$i & + wait + tmp_ret=$(chk_backend_consistency "dir"$i "adir"$i) + (( ret += tmp_ret )) + rm -rf $M0/"dir"$i + rm -rf $M0/"adir"$i + done + return $ret +} + +TEST touch $M0/a; +TEST mv $M0/a $M0/b; + +#test rename fop when one of the bricks is down +kill_brick ${V0} ${H0} ${B0}/${V0}-1; +TEST touch $M0/h; +TEST mv $M0/h $M0/1; + +TEST $CLI volume start $V0 force; + +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1; +find $M0 2>/dev/null 1>/dev/null; +find $M0 | xargs stat 2>/dev/null 1>/dev/null; + +TEST rm_mv_correctness; +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +cleanup; + diff --git a/tests/bugs/replicate/bug-767585-gfid.t b/tests/bugs/replicate/bug-767585-gfid.t new file mode 100755 index 00000000000..4176aabb544 --- /dev/null +++ b/tests/bugs/replicate/bug-767585-gfid.t @@ -0,0 +1,42 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#Test cases to perform gfid-self-heal +#file 'a' should be assigned a fresh gfid +#file 'b' should be healed with gfid1 from brick1 +#file 'c' should be healed with gfid2 from brick2 + +gfid1="0x8428b7193a764bf8be8046fb860b8993" +gfid2="0x85ad91afa2f74694bf52c3326d048209" + +cleanup; +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable +touch $B0/${V0}0/a $B0/${V0}1/a +touch $B0/${V0}0/b $B0/${V0}1/b +touch $B0/${V0}0/c $B0/${V0}1/c + +TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/b +TEST setfattr -n trusted.gfid -v $gfid2 $B0/${V0}1/c + +sleep 2 + +TEST stat $M0/a +TEST stat $M0/b +TEST stat $M0/c + +TEST gf_get_gfid_xattr $B0/${V0}0/a +TEST gf_get_gfid_xattr $B0/${V0}1/a + +EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}0/b +EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}1/b + +EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}0/c +EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}1/c + +cleanup; diff --git a/tests/bugs/replicate/bug-802417.t b/tests/bugs/replicate/bug-802417.t new file mode 100755 index 00000000000..ad411005ced --- /dev/null +++ b/tests/bugs/replicate/bug-802417.t @@ -0,0 +1,108 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function write_file() +{ + path="$1"; shift + echo "$*" > "$path" +} + +cleanup; +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Start and create a volume +mkdir -p ${B0}/${V0}-0 +mkdir -p ${B0}/${V0}-1 +mkdir -p ${B0}/${V0}-2 +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2} + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Make sure io-cache and write-behind don't interfere. +TEST $CLI volume set $V0 performance.io-cache off; +TEST $CLI volume set $V0 performance.write-behind off; +TEST $CLI volume set $V0 performance.stat-prefetch off + +## Make sure automatic self-heal doesn't perturb our results. +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.data-self-heal on +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +## Create a file with some recognizably stale data. +TEST write_file $M0/a_file "old_data" + +## Kill two of the bricks and write some newer data. +TEST kill_brick ${V0} ${H0} ${B0}/${V0}-1 +TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2 +TEST write_file $M0/a_file "new_data" + +## Bring all the bricks up and kill one so we do a partial self-heal. +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 +TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2 +TEST dd if=${M0}/a_file of=/dev/null + + +obs_path_0=${B0}/${V0}-0/a_file +obs_path_1=${B0}/${V0}-1/a_file +obs_path_2=${B0}/${V0}-2/a_file + +tgt_xattr_0="trusted.afr.${V0}-client-0" +tgt_xattr_1="trusted.afr.${V0}-client-1" +tgt_xattr_2="trusted.afr.${V0}-client-2" + +actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_0) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_1) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_2) +EXPECT "0x000000030000000000000000" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_0) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_1) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_2) +EXPECT "0x000000010000000000000000" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_0) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_1) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_2) +EXPECT "0x000000000000000000000000|^\$" echo $actual + +if [ "$EXIT_EARLY" = "1" ]; then + exit 0; +fi + +## Finish up +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-821056.t b/tests/bugs/replicate/bug-821056.t new file mode 100644 index 00000000000..02a9c78b6f0 --- /dev/null +++ b/tests/bugs/replicate/bug-821056.t @@ -0,0 +1,52 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 eager-lock off +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.open-behind off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind on +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable +touch $M0/a + +#Open file with fd as 5 +exec 5>$M0/a +realpath=$(gf_get_gfid_backend_file_path $B0/${V0}0 "a") + +kill_brick $V0 $H0 $B0/${V0}0 +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 + +EXPECT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" + +kill_brick $V0 $H0 $B0/${V0}0 +TEST gf_rm_file_and_gfid_link $B0/${V0}0 "a" +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +ls -l $M0/a 2>&1 > /dev/null #Make sure the file is re-created +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a + +for i in {1..1024}; do + echo "open sesame" >&5 +done + +EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" +#close the fd +exec 5>&- + +#Check that anon-fd based file is not leaking. +EXPECT_WITHIN $REOPEN_TIMEOUT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath" +cleanup; diff --git a/tests/bugs/replicate/bug-830665.t b/tests/bugs/replicate/bug-830665.t new file mode 100755 index 00000000000..3d2ec1145da --- /dev/null +++ b/tests/bugs/replicate/bug-830665.t @@ -0,0 +1,120 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../nfs.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +function recreate { + rm -rf $1 && mkdir -p $1 +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Start and create a volume +recreate ${B0}/${V0}-0 +recreate ${B0}/${V0}-1 +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1} + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +#EXPECT_WITHIN fails the test if the command it executes fails. This function +#returns "" when the file doesn't exist +function friendly_cat { + if [ ! -f $1 ]; + then + echo ""; + else + cat $1; + fi +} + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Make sure stat-prefetch doesn't prevent self-heal checks. +TEST $CLI volume set $V0 performance.stat-prefetch off; + +## Make sure automatic self-heal doesn't perturb our results. +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + + +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +## Mount NFS +TEST mount_nfs $H0:/$V0 $N0 nolock; + +## Create some files and directories +echo "test_data" > $N0/a_file; +mkdir $N0/a_dir; +echo "more_test_data" > $N0/a_dir/another_file; + +## Unmount and stop the volume. +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +TEST $CLI volume stop $V0; + +# Recreate the brick. Note that because of http://review.gluster.org/#change,4202 +# we need to preserve and restore the volume ID or else the brick (and thus the +# entire not-very-HA-any-more volume) won't start. When that bug is fixed, we can +# remove the [gs]etxattr calls. +volid=$(getfattr -e hex -n trusted.glusterfs.volume-id $B0/${V0}-0 2> /dev/null \ + | grep = | cut -d= -f2) +rm -rf $B0/${V0}-0; +mkdir $B0/${V0}-0; +setfattr -n trusted.glusterfs.volume-id -v $volid $B0/${V0}-0 + +## Restart and remount. Note that we use actimeo=0 so that the stat calls +## we need for self-heal don't get blocked by the NFS client. +TEST $CLI volume start $V0; +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock,actimeo=0; + +## The Linux NFS client has a really charming habit of caching stuff right +## after mount, even though we set actimeo=0 above. Life would be much easier +## if NFS developers cared as much about correctness as they do about shaving +## a few seconds off of benchmarks. +ls -l $N0 &> /dev/null; +sleep 5; + +## Force entry self-heal. +TEST $CLI volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST gluster volume heal $V0 full +#ls -lR $N0 > /dev/null; + +## Do NOT check through the NFS mount here. That will force a new self-heal +## check, but we want to test whether self-heal already happened. + +## Make sure everything's in order on the recreated brick. +EXPECT_WITHIN $HEAL_TIMEOUT 'test_data' friendly_cat $B0/${V0}-0/a_file; +EXPECT_WITHIN $HEAL_TIMEOUT 'more_test_data' friendly_cat $B0/${V0}-0/a_dir/another_file; + +if [ "$EXIT_EARLY" = "1" ]; then + exit 0; +fi + +## Finish up +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-853680.t b/tests/bugs/replicate/bug-853680.t new file mode 100755 index 00000000000..806c3d142a1 --- /dev/null +++ b/tests/bugs/replicate/bug-853680.t @@ -0,0 +1,53 @@ +#!/bin/bash +# +# Bug 853680 +# +# Test that io-threads least-rate-limit throttling functions as expected. Set +# a limit, perform a few operations with a least-priority mount and verify +# said operations take a minimum amount of time according to the limit. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 + +#Accept min val +TEST $CLI volume set $V0 performance.least-rate-limit 0 +#Accept some value in between +TEST $CLI volume set $V0 performance.least-rate-limit 1035 +#Accept max val INT_MAX +TEST $CLI volume set $V0 performance.least-rate-limit 2147483647 + +#Reject other values +TEST ! $CLI volume set $V0 performance.least-rate-limit 2147483648 +TEST ! $CLI volume set $V0 performace.least-rate-limit -8 +TEST ! $CLI volume set $V0 performance.least-rate-limit abc +TEST ! $CLI volume set $V0 performance.least-rate-limit 0.0 +TEST ! $CLI volume set $V0 performance.least-rate-limit -10.0 +TEST ! $CLI volume set $V0 performance.least-rate-limit 1% + +# set rate limit to 1 operation/sec +TEST $CLI volume set $V0 performance.least-rate-limit 1 + +# use client-pid=-1 for least priority mount +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --client-pid=-1 + +# create a few files and verify this takes more than a few seconds +date1=`date +%s` +TEST touch $M0/file{0..2} +date2=`date +%s` + +optime=$(($date2 - $date1)) +TEST [ $optime -ge 3 ] + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup; diff --git a/tests/bugs/replicate/bug-859581.t b/tests/bugs/replicate/bug-859581.t new file mode 100755 index 00000000000..d8b45a257a1 --- /dev/null +++ b/tests/bugs/replicate/bug-859581.t @@ -0,0 +1,53 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2} +EXPECT 'Created' volinfo_field $V0 'Status'; +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST glusterfs --direct-io-mode=yes --use-readdirp=no --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +mkdir -p $M0/dir1/dir2 + +TEST rm -f $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") +TEST rmdir $B0/${V0}1/dir1/dir2 + +TEST stat $M0/dir1/dir2 + +TEST [ -d $B0/${V0}1/dir1/dir2 ] +TEST [ ! -d $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ] + +# Stop the volume to flush caches and force symlink recreation +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +EXPECT 'Stopped' volinfo_field $V0 'Status'; +TEST $CLI volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status'; +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0; + +# Till now, protocol/server was not doing inode linking as part of readdirp. +# But pas part of user servicable snapshots patcth, changes to do inode linking +# in protocol/server in readdirp, were introduced. So now to make sure +# the gfid handle of dir1 is healed, explicit lookup has to be sent on it. +# Otherwise, whenever ls -l is done just on the mount point $M0, lookup on the +# entries received as part of readdirp, is not sent, because the inodes for +# those entries were linked as part of readdirp itself. i.e instead of doing +# "ls -l $M0", it has to be the below command. +ls -l $M0/dir1; + +TEST [ -h $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ] + +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup + diff --git a/tests/bugs/replicate/bug-865825.t b/tests/bugs/replicate/bug-865825.t new file mode 100755 index 00000000000..ffb2e0f6437 --- /dev/null +++ b/tests/bugs/replicate/bug-865825.t @@ -0,0 +1,82 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +## Start and create a volume +mkdir -p ${B0}/${V0}-0 +mkdir -p ${B0}/${V0}-1 +mkdir -p ${B0}/${V0}-2 +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2} + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + + +## Verify volume is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Make sure io-cache and write-behind don't interfere. +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume set $V0 performance.io-cache off; +TEST $CLI volume set $V0 performance.quick-read off; +TEST $CLI volume set $V0 performance.write-behind off; +TEST $CLI volume set $V0 performance.stat-prefetch off + +## Make sure automatic self-heal doesn't perturb our results. +TEST $CLI volume set $V0 cluster.self-heal-daemon off + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +## Mount native +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 + +## Create a file with some recognizable contents. +echo "test_data" > $M0/a_file; + +## Unmount. +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +## Mess with the flags as though brick-0 accuses brick-2 while brick-1 is +## missing its brick-2 changelog altogether. +value=0x000000010000000000000000 +setfattr -n trusted.afr.${V0}-client-2 -v $value $B0/${V0}-0/a_file +setfattr -x trusted.afr.${V0}-client-2 $B0/${V0}-1/a_file +echo "wrong_data" > $B0/${V0}-2/a_file + +gluster volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +gluster volume heal $V0 full + +## Make sure brick 2 now has the correct contents. +EXPECT_WITHIN $HEAL_TIMEOUT "test_data" cat $B0/${V0}-2/a_file + +if [ "$EXIT_EARLY" = "1" ]; then + exit 0; +fi + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-880898.t b/tests/bugs/replicate/bug-880898.t new file mode 100644 index 00000000000..123e7e16425 --- /dev/null +++ b/tests/bugs/replicate/bug-880898.t @@ -0,0 +1,23 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +TEST glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2 +TEST $CLI volume start $V0 +pkill glusterfs +uuid="" +for line in $(cat $GLUSTERD_WORKDIR/glusterd.info) +do + if [[ $line == UUID* ]] + then + uuid=`echo $line | sed -r 's/^.{5}//'` + fi +done + +#Command execution should fail reporting that the bricks are not running. +TEST ! $CLI volume heal $V0 info + +cleanup; diff --git a/tests/bugs/replicate/bug-884328.t b/tests/bugs/replicate/bug-884328.t new file mode 100644 index 00000000000..acc8e542240 --- /dev/null +++ b/tests/bugs/replicate/bug-884328.t @@ -0,0 +1,12 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST check_option_help_presence "cluster.quorum-type" +TEST check_option_help_presence "cluster.quorum-count" +cleanup; diff --git a/tests/bugs/replicate/bug-886998.t b/tests/bugs/replicate/bug-886998.t new file mode 100644 index 00000000000..bcac235ff09 --- /dev/null +++ b/tests/bugs/replicate/bug-886998.t @@ -0,0 +1,52 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +# This tests that the replicate trash directory(.landfill) has following +# properties. +# Note: This is to have backward compatibility with 3.3 glusterfs +# In the latest releases this dir is present inside .glusterfs of brick. +# 1) lookup of trash dir fails +# 2) readdir does not show this directory +# 3) Self-heal does not do any self-heal of these directories. +gfid1="0xc2e75dde97f346e7842d1076a8e699f8" +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable + +TEST mkdir $B0/${V0}1/.landfill +TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}1/.landfill +TEST mkdir $B0/${V0}0/.landfill +TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/.landfill + +TEST ! stat $M0/.landfill +EXPECT "" echo $(ls -a $M0 | grep ".landfill") + +TEST rmdir $B0/${V0}0/.landfill +#Force a conservative merge and it should not create .landfill +TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/ +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/ + +TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/ +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/ + +EXPECT "" echo $(ls -a $M0 | grep ".landfill") +TEST ! stat $B0/${V0}0/.landfill +TEST stat $B0/${V0}1/.landfill + +#TEST that the dir is not deleted even when xattrs suggest to delete +TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/ +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/ + +TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/ +TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/ + +EXPECT "" echo $(ls -a $M0 | grep ".landfill") +TEST ! stat $B0/${V0}0/.landfill +TEST stat $B0/${V0}1/.landfill +cleanup; diff --git a/tests/bugs/replicate/bug-888174.t b/tests/bugs/replicate/bug-888174.t new file mode 100644 index 00000000000..8c70265513d --- /dev/null +++ b/tests/bugs/replicate/bug-888174.t @@ -0,0 +1,62 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This tests if flush, fsync wakes up the delayed post-op or not. +#If it is not woken up, INODELK from the next command waits +#for post-op-delay secs. There would be pending changelog even after the command +#completes. + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1 + +TEST $CLI volume set $V0 cluster.eager-lock on + +TEST $CLI volume set $V0 performance.flush-behind off +EXPECT "off" volume_option $V0 performance.flush-behind + +TEST $CLI volume set $V0 cluster.post-op-delay-secs 3 +EXPECT "3" volume_option $V0 cluster.post-op-delay-secs + +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 + +#Check that INODELK MAX latency is not in the order of seconds +TEST gluster volume profile $V0 start +for i in {1..5} +do + echo hi > $M0/a +done +#Test if the MAX INODELK fop latency is of the order of seconds. +inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") + +TEST [ -z $inodelk_max_latency ] + +TEST dd of=$M0/a if=/dev/urandom bs=1024k count=10 conv=fsync +#Check for no trace of pending changelog. Flush should make sure of it. +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty + + +dd of=$M0/a if=/dev/urandom bs=1024k count=1024 2>/dev/null & +p=$! +#trigger graph switches, tests for fsync not leaving any pending flags +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off + +kill -TERM $p +#wait for dd to exit +wait > /dev/null 2>&1 + +#Goal is to check if there is permanent FOOL changelog +sleep 5 +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty +EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty + +cleanup; diff --git a/tests/bugs/replicate/bug-913051.t b/tests/bugs/replicate/bug-913051.t new file mode 100644 index 00000000000..1c218397276 --- /dev/null +++ b/tests/bugs/replicate/bug-913051.t @@ -0,0 +1,67 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc + +cleanup; + +#Test that afr opens the file on the bricks that were offline at the time of +# open after the brick comes online. This tests for writev, readv triggering +# open-fd-fix in afr. +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 performance.quick-read off +TEST $CLI volume set $V0 performance.open-behind off +TEST $CLI volume set $V0 performance.io-cache off +TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 performance.read-ahead off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume start $V0 +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable +TEST kill_brick $V0 $H0 $B0/${V0}0 + +TEST mkdir $M0/dir +TEST touch $M0/dir/a +TEST touch $M0/dir/b +echo abc > $M0/dir/b + +TEST wfd=`fd_available` +TEST fd_open $wfd "w" $M0/dir/a +TEST rfd=`fd_available` +TEST fd_open $rfd "r" $M0/dir/b + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 + +#check that the files are not opned on brick-0 +TEST stat $M0/dir/a +realpatha=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/a") +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha" +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/a + +TEST stat $M0/dir/b +realpathb=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/b") +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb" +EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/b + +#attempt self-heal so that the files are created on brick-0 + +TEST dd if=$M0/dir/a of=/dev/null bs=1024k +TEST dd if=$M0/dir/b of=/dev/null bs=1024k + +#trigger writev for attempting open-fd-fix in afr +TEST fd_write $wfd "open sesame" + +#trigger readv for attempting open-fd-fix in afr +TEST fd_cat $rfd + +EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha" +EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb" + +TEST fd_close $wfd +TEST fd_close $rfd +cleanup; diff --git a/tests/bugs/replicate/bug-916226.t b/tests/bugs/replicate/bug-916226.t new file mode 100644 index 00000000000..893905f9a47 --- /dev/null +++ b/tests/bugs/replicate/bug-916226.t @@ -0,0 +1,26 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3 +TEST $CLI volume set $V0 cluster.eager-lock on +TEST $CLI volume start $V0 + +## Mount FUSE +TEST glusterfs -s $H0 --volfile-id $V0 $M0; + +TEST mkdir $M0/dir{1..10}; +TEST touch $M0/dir{1..10}/files{1..10}; + +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5 + +TEST $CLI volume rebalance $V0 start force +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 + +cleanup; diff --git a/tests/bugs/replicate/bug-918437-sh-mtime.t b/tests/bugs/replicate/bug-918437-sh-mtime.t new file mode 100644 index 00000000000..04ac02f6337 --- /dev/null +++ b/tests/bugs/replicate/bug-918437-sh-mtime.t @@ -0,0 +1,71 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function get_mtime { + local f=$1 + stat $f | grep Modify | awk '{print $2 $3}' | cut -f1 -d'.' +} + +function file_exists { + if [ -f $1 ]; then echo "Y"; else echo "N"; fi +} +cleanup; + +## Tests if mtime is correct after self-heal. +TEST glusterd +TEST pidof glusterd +TEST mkdir -p $B0/gfs0/brick0{1,2} +TEST $CLI volume create $V0 replica 2 transport tcp $H0:$B0/gfs0/brick01 $H0:$B0/gfs0/brick02 +TEST $CLI volume set $V0 nfs.disable on +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --direct-io-mode=enable +# file 'a' is healed from brick02 to brick01 where as file 'b' is healed from +# brick01 to brick02 + +TEST cp -p /etc/passwd $M0/a +TEST cp -p /etc/passwd $M0/b + +#Store mtimes before self-heals +TEST modify_atstamp=$(get_mtime $B0/gfs0/brick02/a) +TEST modify_btstamp=$(get_mtime $B0/gfs0/brick02/b) + +TEST $CLI volume stop $V0 +TEST gf_rm_file_and_gfid_link $B0/gfs0/brick01 a +TEST gf_rm_file_and_gfid_link $B0/gfs0/brick02 b + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +TEST $CLI volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 + +#TODO remove these 2 lines once heal-full is fixed in v2. +TEST stat $M0/a +TEST stat $M0/b + +TEST gluster volume heal $V0 full +EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick01/a +EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick02/b +EXPECT_WITHIN $HEAL_TIMEOUT 0 afr_get_pending_heal_count $V0 + +size=`stat -c '%s' /etc/passwd` +EXPECT $size stat -c '%s' $B0/gfs0/brick01/a + +TEST modify_atstamp1=$(get_mtime $B0/gfs0/brick01/a) +TEST modify_atstamp2=$(get_mtime $B0/gfs0/brick02/a) +EXPECT $modify_atstamp echo $modify_atstamp1 +EXPECT $modify_atstamp echo $modify_atstamp2 + +TEST modify_btstamp1=$(get_mtime $B0/gfs0/brick01/b) +TEST modify_btstamp2=$(get_mtime $B0/gfs0/brick02/b) +EXPECT $modify_btstamp echo $modify_btstamp1 +EXPECT $modify_btstamp echo $modify_btstamp2 +cleanup; diff --git a/tests/bugs/replicate/bug-921231.t b/tests/bugs/replicate/bug-921231.t new file mode 100644 index 00000000000..93c642beb1e --- /dev/null +++ b/tests/bugs/replicate/bug-921231.t @@ -0,0 +1,31 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +# This test writes to same file with 2 fds and tests that eager-lock is not +# causing extra delay because of post-op-delay-secs +cleanup; + +function write_to_file { + dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 +TEST $CLI volume set $V0 eager-lock on +TEST $CLI volume set $V0 post-op-delay-secs 3 +TEST $CLI volume set $V0 client-log-level DEBUG +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +TEST $CLI volume set $V0 ensure-durability off +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +write_to_file & +write_to_file & +wait +#Test if the MAX [F]INODELK fop latency is of the order of seconds. +inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") +TEST [ -z $inodelk_max_latency ] + +cleanup; diff --git a/tests/bugs/replicate/bug-957877.t b/tests/bugs/replicate/bug-957877.t new file mode 100644 index 00000000000..12901723880 --- /dev/null +++ b/tests/bugs/replicate/bug-957877.t @@ -0,0 +1,33 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0; + +TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; +kill_brick $V0 $H0 $B0/${V0}0 +TEST touch $M0/f1 +TEST setfattr -n "user.foo" -v "test" $M0/f1 + +BRICK=$B0"/${V0}1" + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 + +# Wait for self-heal to complete +EXPECT_WITHIN $HEAL_TIMEOUT '1' count_sh_entries $BRICK; + +TEST getfattr -n "user.foo" $B0/${V0}0/f1; + +TEST $CLI volume stop $V0; +TEST $CLI volume delete $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-966018.t b/tests/bugs/replicate/bug-966018.t new file mode 100644 index 00000000000..be4d0b97b88 --- /dev/null +++ b/tests/bugs/replicate/bug-966018.t @@ -0,0 +1,35 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../nfs.rc + +#This tests if eager-lock blocks metadata operations on nfs/fuse mounts. +#If it is not woken up, INODELK from the next command waits +#for post-op-delay secs. + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1 +TEST $CLI volume set $V0 ensure-durability off +TEST $CLI volume set $V0 cluster.eager-lock on +TEST $CLI volume set $V0 cluster.post-op-delay-secs 3 + +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; +TEST mount_nfs $H0:/$V0 $N0 nolock; +TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 +echo 1 > $N0/1 && chmod +x $N0/1 +echo 1 > $M0/1 && chmod +x $M0/1 + +#Check that INODELK MAX latency is not in the order of seconds +#Test if the MAX INODELK fop latency is of the order of seconds. +inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}") + +TEST [ -z $inodelk_max_latency ] +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +cleanup; diff --git a/tests/bugs/replicate/bug-976800.t b/tests/bugs/replicate/bug-976800.t new file mode 100644 index 00000000000..35a40a3c72e --- /dev/null +++ b/tests/bugs/replicate/bug-976800.t @@ -0,0 +1,28 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +# This test checks if there are any open fds on the brick +# even after the file is closed on the mount. This particular +# test tests dd with "fsync" to check afr's fsync codepath +cleanup; + +function is_fd_open { + local v=$1 + local h=$2 + local b=$3 + local bpid=$(get_brick_pid $v $h $b) + ls -l /proc/$bpid/fd | grep -w "\-> $b/1" +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 ensure-durability off +TEST $CLI volume set $V0 eager-lock off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST dd of=$M0/1 if=/dev/zero bs=1k count=1 conv=fsync +TEST ! is_fd_open $V0 $H0 $B0/${V0}0 +cleanup; diff --git a/tests/bugs/replicate/bug-977797.t b/tests/bugs/replicate/bug-977797.t new file mode 100755 index 00000000000..3ff14ecf3d5 --- /dev/null +++ b/tests/bugs/replicate/bug-977797.t @@ -0,0 +1,95 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +## Start and create a volume +TEST glusterd; +TEST pidof glusterd; +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}; + +## Verify volume is is created +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; + +## Start volume and verify +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume set $V0 open-behind off +TEST $CLI volume set $V0 quick-read off +TEST $CLI volume set $V0 read-ahead off +TEST $CLI volume set $V0 write-behind off +TEST $CLI volume set $V0 io-cache off +TEST $CLI volume set $V0 background-self-heal-count 0 + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + + +TEST mkdir -p $M0/a +TEST `echo "GLUSTERFS" > $M0/a/file` + +TEST kill_brick $V0 $H0 $B0/$V0"1" + +TEST chown root $M0/a +TEST chown root $M0/a/file +TEST `echo "GLUSTER-FILE-SYSTEM" > $M0/a/file` +TEST mkdir $M0/a/b + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0; + + + +TEST kill_brick $V0 $H0 $B0/$V0"2" + +TEST chmod 757 $M0/a +TEST chmod 757 $M0/a/file + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1; + +TEST dd if=$M0/a/file of=/dev/null bs=1024k + +b1c0dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \ + trusted.afr.$V0-client-0 "entry") +b1c1dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \ + trusted.afr.$V0-client-1 "entry") +b2c0dir=$(afr_get_specific_changelog_xattr \ + $B0/$V0"2"/a trusted.afr.$V0-client-0 "entry") +b2c1dir=$(afr_get_specific_changelog_xattr \ + $B0/$V0"2"/a trusted.afr.$V0-client-1 "entry") + + +b1c0f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \ + trusted.afr.$V0-client-0 "data") +b1c1f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \ + trusted.afr.$V0-client-1 "data") +b2c0f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \ + trusted.afr.$V0-client-0 "data") +b2c1f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \ + trusted.afr.$V0-client-1 "data") + +EXPECT "00000000|^$" echo $b1c0f +EXPECT "00000000|^$" echo $b1c1f +EXPECT "00000000|^$" echo $b2c0f +EXPECT "00000000|^$" echo $b2c1f + +EXPECT "00000000|^$" echo $b1c0dir +EXPECT "00000000|^$" echo $b1c1dir +EXPECT "00000000|^$" echo $b2c0dir +EXPECT "00000000|^$" echo $b2c1dir + +## Finish up +TEST $CLI volume stop $V0; +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; +TEST ! $CLI volume info $V0; + +cleanup; diff --git a/tests/bugs/replicate/bug-978794.t b/tests/bugs/replicate/bug-978794.t new file mode 100644 index 00000000000..8e43e74bf79 --- /dev/null +++ b/tests/bugs/replicate/bug-978794.t @@ -0,0 +1,29 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../fileio.rc + + +# This test opens 100 fds and triggers graph switches to check if fsync +# as part of graph-switch causes crash or not. + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +TEST touch $M0/{1..100} +for i in {1..100}; do fd[$i]=`fd_available`; fd_open ${fd[$i]} 'w' $M0/$i; done +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{2,3} +TEST $CLI volume rebalance $V0 start force +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 +TEST cat $M0/{1..100} +for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{4,5} +TEST $CLI volume rebalance $V0 start force +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0 +for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done +TEST cat $M0/{1..100} +cleanup diff --git a/tests/bugs/replicate/bug-979365.t b/tests/bugs/replicate/bug-979365.t new file mode 100755 index 00000000000..b1396c23348 --- /dev/null +++ b/tests/bugs/replicate/bug-979365.t @@ -0,0 +1,47 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This script checks that ensure-durability option enables/disables afr +#sending fsyncs +cleanup; + +function num_fsyncs { + $CLI volume profile $V0 info | grep -w FSYNC | wc -l +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 ensure-durability on +TEST $CLI volume set $V0 eager-lock off +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST dd of=$M0/a if=/dev/zero bs=1024k count=10 +#fsyncs take a while to complete. +sleep 5 + +# There can be zero or more fsyncs, depending on the order +# in which the writes reached the server, in turn deciding +# whether they were treated as "appending" writes or not. + +TEST [[ $(num_fsyncs) -ge 0 ]] +#Stop the volume to erase the profile info of old operations +TEST $CLI volume profile $V0 stop +TEST $CLI volume stop $V0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 +#Disable ensure-durability now to disable fsyncs in afr. +TEST $CLI volume set $V0 ensure-durability off +TEST $CLI volume start $V0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +TEST $CLI volume profile $V0 start +TEST dd of=$M0/a if=/dev/zero bs=1024k count=10 +#fsyncs take a while to complete. +sleep 5 +TEST [[ $(num_fsyncs) -eq 0 ]] + +cleanup diff --git a/tests/bugs/replicate/bug-986905.t b/tests/bugs/replicate/bug-986905.t new file mode 100755 index 00000000000..f4f7386ebc4 --- /dev/null +++ b/tests/bugs/replicate/bug-986905.t @@ -0,0 +1,27 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +#This script checks if hardlinks that are created while a brick is down are +#healed properly. + +cleanup; +function get_inum { + ls -i $1 | awk '{print $1}' +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST touch $M0/a +TEST ln $M0/a $M0/link_a +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +TEST ls -l $M0 +inum=$(get_inum $B0/${V0}0/a) +EXPECT "$inum" get_inum $B0/${V0}0/link_a +cleanup -- cgit