From 50ac9c87020d0e9712491c04afbb208a01c6ecbd Mon Sep 17 00:00:00 2001 From: karthik-us Date: Sat, 18 Mar 2017 13:44:56 +0530 Subject: cluster/afr: Undo pending xattrs only on the up bricks Problem: While doing conservative merge, even if a brick is down, it will reset the pending xattr on that. When that brick comes up, as part of the heal, it will consider this brick as the source and removes the entries on the other bricks, which leads to data loss. Fix: Undo pending only for the bricks which are up. > Change-Id: I18436fa0bb1faa5f60531b357dea3f6b20446303 > BUG: 1433571 > Signed-off-by: karthik-us > Reviewed-on: https://review.gluster.org/16913 > Reviewed-by: Pranith Kumar Karampuri > Smoke: Gluster Build System > NetBSD-regression: NetBSD Build System > CentOS-regression: Gluster Build System > Reviewed-by: Ravishankar N (cherry picked from commit f91596e6566c605e70a31a60523d11f78a097c3c) Change-Id: Id20c9ce53ee59f005d977494903247e2a8024ed1 BUG: 1436231 Signed-off-by: karthik-us Reviewed-on: https://review.gluster.org/16956 Smoke: Gluster Build System NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System Reviewed-by: Ravishankar N Reviewed-by: Pranith Kumar Karampuri --- .../bug-1433571-undo-pending-only-on-up-bricks.t | 89 ++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t (limited to 'tests') diff --git a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t new file mode 100644 index 00000000000..271abb4fe9a --- /dev/null +++ b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t @@ -0,0 +1,89 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} +TEST $CLI volume start $V0 +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; + +# Disable self-heal-daemon, client-side-heal and set quorum-type to none +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume set $V0 cluster.quorum-type none + +#Kill bricks 0 & 1 and create a file to have pending entry for 0 & 1 on brick 2 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST kill_brick $V0 $H0 $B0/${V0}1 +echo "file 1" >> $M0/f1 +EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2 +EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +#Kill bricks 1 & 2 and create a file to have pending entry for 1 & 2 on brick 0 +TEST kill_brick $V0 $H0 $B0/${V0}1 +TEST kill_brick $V0 $H0 $B0/${V0}2 +echo "file 2" >> $M0/f2 +EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0 +EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +#Kill bricks 2 & 0 and create a file to have pending entry for 2 & 0 on brick 1 +TEST kill_brick $V0 $H0 $B0/${V0}2 +TEST kill_brick $V0 $H0 $B0/${V0}0 +echo "file 3" >> $M0/f3 +EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1 +EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +#Kill brick 0 and turn on the client side heal and do ls to trigger the heal. +#The pending xattrs on bricks 1 & 2 should have pending entry on brick 0. +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST $CLI volume set $V0 cluster.data-self-heal on +TEST $CLI volume set $V0 cluster.metadata-self-heal on +TEST $CLI volume set $V0 cluster.entry-self-heal on + +TEST ls $M0 +EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1 +EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1 +EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2 +EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2 + +#Bring back all the bricks and trigger the heal again by doing ls. Now the +#pending xattrs on all the bricks should be 0. +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +TEST ls $M0 + +TEST cat $M0/f1 +TEST cat $M0/f2 +TEST cat $M0/f3 + +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0 +EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0 +EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1 +EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1 +EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2 +EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2 + +#Check whether all the bricks contains all the 3 files. +EXPECT "3" echo $(ls $B0/${V0}0 | wc -l) +EXPECT "3" echo $(ls $B0/${V0}1 | wc -l) +EXPECT "3" echo $(ls $B0/${V0}2 | wc -l) + +cleanup; -- cgit From a6d313d12c98cf533c6bbb10f491dd2ec48ca89c Mon Sep 17 00:00:00 2001 From: Ravishankar N Date: Wed, 19 Apr 2017 16:40:05 +0530 Subject: afr: don't do a post-op on a brick if op failed Problem: In afr-v2, self-blaming xattrs are not there by design. But if the FOP failed on a brick due to an error other than ENOTCONN (or even due to ENOTCONN, but we regained connection before postop was wound), we wind the post-op also on the failed brick, leading to setting self-blaming xattrs on that brick. This can lead to undesired results like healing of files in split-brain etc. Fix: If a fop failed on a brick on which pre-op was successful, do not perform post-op on it. This also produces the desired effect of not resetting the dirty xattr on the brick, which is how it should be because if the fop failed on a brick, there is no reason to clear the dirty bit which actually serves as an indication of the failure. > Reviewed-on: https://review.gluster.org/16976 > Smoke: Gluster Build System > NetBSD-regression: NetBSD Build System > CentOS-regression: Gluster Build System > Reviewed-by: Pranith Kumar Karampuri Change-Id: I5f1caf4d1b39f36cf8093ccef940118638caa9c4 BUG: 1443319 Signed-off-by: Ravishankar N Reviewed-on: https://review.gluster.org/17082 Smoke: Gluster Build System NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System Reviewed-by: Pranith Kumar Karampuri --- .../bug-1438255-do-not-mark-self-accusing-xattrs.t | 46 ++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t (limited to 'tests') diff --git a/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t b/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t new file mode 100644 index 00000000000..edfd0d7820d --- /dev/null +++ b/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t @@ -0,0 +1,46 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +NEW_USER=bug1438255 +NEW_UID=1438255 +NEW_GID=1438255 + +TEST groupadd -o -g ${NEW_GID} ${NEW_USER}-${NEW_GID} +TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -K MAIL_DIR=/dev/null ${NEW_USER} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off + +TEST $CLI volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status' +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 + +TEST touch $M0/FILE +TEST kill_brick $V0 $H0 $B0/${V0}2 +chown $NEW_UID:$NEW_GID $M0/FILE +EXPECT "000000000000000100000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE +EXPECT "000000000000000100000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1/FILE +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 + +# setfattr done as NEW_USER fails on 3rd brick with EPERM but suceeds on +# the first 2 and hence on the mount. +su -m bug1438255 -c "setfattr -n user.myattr -v myvalue $M0/FILE" +TEST [ $? -eq 0 ] +EXPECT "000000000000000200000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE +EXPECT "000000000000000200000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1/FILE +# Brick 3 does not have any self-blaming pending xattr. +TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE + +TEST userdel --force ${NEW_USER} +TEST groupdel ${NEW_USER}-${NEW_GID} +cleanup + + -- cgit From 6a5d9764e687b74b0686d492120584e88a6f8110 Mon Sep 17 00:00:00 2001 From: Saravanakumar Arumugam Date: Fri, 8 Jul 2016 19:10:45 +0530 Subject: geo-rep: filter out xtime attribute during getxattr georep gsyncd's xtime needs to filtered irrespective of any process access. This way, we can avoid (unnecessarily)syncing xtime attribute to slave, which may raise permission denied errors. test case modified to check for xtime xattr only in backend. Back port of> >Change-Id: I2390b703048d5cc747d91fa2ae884dc55de58669 >BUG: 1353952 >Signed-off-by: Saravanakumar Arumugam >Signed-off-by: Mohammed Rafi KC >Reviewed-on: https://review.gluster.org/14880 >Smoke: Gluster Build System >Reviewed-by: Kotresh HR >Tested-by: Kotresh HR >NetBSD-regression: NetBSD Build System >CentOS-regression: Gluster Build System >Reviewed-by: Pranith Kumar Karampuri Change-Id: Ibdee6f3093648a7e0fb1e2b6be8172e604ab657f BUG: 1441574 Signed-off-by: Mohammed Rafi KC Reviewed-on: https://review.gluster.org/17045 Smoke: Gluster Build System NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System Reviewed-by: Kotresh HR Reviewed-by: Niels de Vos --- tests/basic/geo-replication/marker-xattrs.t | 17 +++++++---------- tests/bugs/geo-replication/bug-1296496.t | 3 ++- tests/bugs/geo-replication/bug-877293.t | 4 ++-- 3 files changed, 11 insertions(+), 13 deletions(-) (limited to 'tests') diff --git a/tests/basic/geo-replication/marker-xattrs.t b/tests/basic/geo-replication/marker-xattrs.t index dd5483d7e95..e5b26a6bd5b 100755 --- a/tests/basic/geo-replication/marker-xattrs.t +++ b/tests/basic/geo-replication/marker-xattrs.t @@ -24,11 +24,11 @@ TEST touch $M0 vol_uuid=$(get_volume_mark $M1) xtime=trusted.glusterfs.$vol_uuid.xtime -TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" +TEST "getfattr -n $xtime $B0/${V0}-1 | grep -q ${xtime}=" TEST kill_brick $V0 $H0 $B0/${V0}-0 -TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" +TEST "getfattr -n $xtime $B0/${V0}-1 | grep -q ${xtime}=" TEST getfattr -d -m. -e hex $M1 EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 @@ -61,13 +61,13 @@ vol_uuid=$(get_volume_mark $M1) xtime=trusted.glusterfs.$vol_uuid.xtime stime=trusted.glusterfs.$vol_uuid.stime -stime_val=$(getfattr -e hex -n $xtime $M1 | grep ${xtime}= | cut -f2 -d'=') +stime_val=$(getfattr -e hex -n $xtime $B0/${V0}-1 | grep ${xtime}= | cut -f2 -d'=') TEST "setfattr -n $stime -v $stime_val $B0/${V0}-1" -TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" +TEST "getfattr -n $xtime $B0/${V0}-1 | grep -q ${xtime}=" TEST kill_brick $V0 $H0 $B0/${V0}-0 -TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" +TEST "getfattr -n $xtime $B0/${V0}-1 | grep -q ${xtime}=" TEST "getfattr -n $stime $M1 | grep -q ${stime}=" TEST getfattr -d -m. -e hex $M1 @@ -98,12 +98,9 @@ TEST touch $M0 vol_uuid=$(get_volume_mark $M1) xtime=trusted.glusterfs.$vol_uuid.xtime -TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" +TEST "getfattr -n $xtime $B0/${V0}-0 | grep -q ${xtime}=" -TEST kill_brick $V0 $H0 $B0/${V0}-0 - -#Stripe doesn't tolerate ENOTCONN -TEST ! "getfattr -n $xtime $M1 | grep -q ${xtime}=" +TEST "getfattr -n $xtime $B0/${V0}-1 | grep -q ${xtime}=" EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1 diff --git a/tests/bugs/geo-replication/bug-1296496.t b/tests/bugs/geo-replication/bug-1296496.t index 703fda65b84..a157be7849a 100644 --- a/tests/bugs/geo-replication/bug-1296496.t +++ b/tests/bugs/geo-replication/bug-1296496.t @@ -29,7 +29,8 @@ xtime="trusted.glusterfs.$vol_uuid.xtime" #TEST xtime TEST ! getfattr -n $xtime $M0 -TEST getfattr -n $xtime $M1 +TEST getfattr -n $xtime $B0/${V0}-0 +TEST getfattr -n $xtime $B0/${V0}-1 #TEST stime slave_uuid=$(uuidgen) diff --git a/tests/bugs/geo-replication/bug-877293.t b/tests/bugs/geo-replication/bug-877293.t index 542774ab900..c5205e8109e 100755 --- a/tests/bugs/geo-replication/bug-877293.t +++ b/tests/bugs/geo-replication/bug-877293.t @@ -26,11 +26,11 @@ TEST touch $M0 vol_uuid=`getfattr -n trusted.glusterfs.volume-mark -ehex $M1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'` xtime=trusted.glusterfs.$vol_uuid.xtime -TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" +TEST "getfattr -n $xtime $B0/${V0}-0 | grep -q ${xtime}=" TEST kill_brick $V0 $H0 $B0/${V0}-0 -TEST "getfattr -n $xtime $M1 | grep -q ${xtime}=" +TEST "getfattr -n $xtime $B0/${V0}-1 | grep -q ${xtime}=" EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1 -- cgit From 2dcb19813e7dbb2afd2f482ed9a3401371325b1d Mon Sep 17 00:00:00 2001 From: Sanoj Unnikrishnan Date: Wed, 22 Mar 2017 15:02:12 +0530 Subject: Fixes quota aux mount failure The aux mount is created on the first limit/remove_limit/list command and it remains until volume is stopped / deleted / (quota is disabled) , where we do a lazy unmount. If the process is uncleanly terminated, then the mount entry remains and we get (Transport disconnected) error on subsequent attempts to run quota list/limit-usage/remove commands. Second issue, There is also a risk of inadvertent rm -rf on the /var/run/gluster causing data loss for the user. Ideally, /var/run is a temp path for application use and should not cause any data loss to persistent storage. Solution: 1) unmount the aux mount after each use. 2) clean stale mount before mounting, if any. One caveat with doing mount/unmount on each command is that we cannot use same mount point for both list and limit commands. The reason for this is that list command needs mount to be accessible in cli after response from glusterd, So it could be unmounted by a limit command if executed in parallel (had we used same mount point) Hence we use separate mount points for list and limit commands. > Reviewed-on: https://review.gluster.org/16938 > NetBSD-regression: NetBSD Build System > Smoke: Gluster Build System > Reviewed-by: Manikandan Selvaganesh > CentOS-regression: Gluster Build System > Reviewed-by: Raghavendra G > Reviewed-by: Atin Mukherjee > (cherry picked from commit 2ae4b4058691b324535d802f4e6d24cce89a10e5) Change-Id: I4f9e39da2ac2b65941399bffb6440db8a6ba59d0 BUG: 1449782 Signed-off-by: Sanoj Unnikrishnan Reviewed-on: https://review.gluster.org/17242 Smoke: Gluster Build System NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System Reviewed-by: Raghavendra G --- tests/basic/ec/quota.t | 1 - tests/basic/quota-ancestry-building.t | 1 - tests/basic/quota-anon-fd-nfs.t | 1 - tests/basic/quota-nfs.t | 1 - tests/basic/quota.t | 6 --- tests/basic/quota_aux_mount.t | 53 ++++++++++++++++++++++ tests/bugs/cli/bug-1022905.t | 1 - tests/bugs/distribute/bug-1099890.t | 1 - tests/bugs/distribute/bug-1161156.t | 1 - ...ve-quota-related-option-after-disabling-quota.t | 1 - tests/bugs/glusterfs/bug-848251.t | 1 - tests/bugs/posix/bug-990028.t | 1 - tests/bugs/quota/bug-1087198.t | 1 - ...436-calculate-quota-cksum-during-snap-restore.t | 1 - tests/volume.rc | 19 +++++++- 15 files changed, 71 insertions(+), 19 deletions(-) create mode 100755 tests/basic/quota_aux_mount.t (limited to 'tests') diff --git a/tests/basic/ec/quota.t b/tests/basic/ec/quota.t index b023240b87e..c9612c8b76a 100755 --- a/tests/basic/ec/quota.t +++ b/tests/basic/ec/quota.t @@ -40,7 +40,6 @@ EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "8.0MB" quotausage "/test" TEST rm $M0/test/file2.txt EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "0Bytes" quotausage "/test" TEST $CLI volume stop $V0 -EXPECT "1" get_aux rm -f $QDD cleanup; diff --git a/tests/basic/quota-ancestry-building.t b/tests/basic/quota-ancestry-building.t index 99c971859e8..5d2f4a7dd66 100755 --- a/tests/basic/quota-ancestry-building.t +++ b/tests/basic/quota-ancestry-building.t @@ -65,7 +65,6 @@ exec 5>&- exec 6>&- TEST $CLI volume stop $V0 -EXPECT "1" get_aux rm -f $QDD cleanup; diff --git a/tests/basic/quota-anon-fd-nfs.t b/tests/basic/quota-anon-fd-nfs.t index c6b01553b02..d911cc90b87 100755 --- a/tests/basic/quota-anon-fd-nfs.t +++ b/tests/basic/quota-anon-fd-nfs.t @@ -98,7 +98,6 @@ $CLI volume statedump $V0 all EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 TEST $CLI volume stop $V0 -EXPECT "1" get_aux rm -f $QDD diff --git a/tests/basic/quota-nfs.t b/tests/basic/quota-nfs.t index 74fde400bd1..663a8da90ad 100755 --- a/tests/basic/quota-nfs.t +++ b/tests/basic/quota-nfs.t @@ -58,7 +58,6 @@ TEST rm -f $N0/$deep/newfile_2 EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0 TEST $CLI volume stop $V0 -EXPECT "1" get_aux rm -f $QDD cleanup; diff --git a/tests/basic/quota.t b/tests/basic/quota.t index 17d571060e1..7f8b21de6f8 100755 --- a/tests/basic/quota.t +++ b/tests/basic/quota.t @@ -40,12 +40,8 @@ EXPECT 'on' volinfo_field $V0 'features.quota' EXPECT 'on' volinfo_field $V0 'features.inode-quota' EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs' -#Wait for the auxiliarymount to come up -sleep 3 TEST $CLI volume quota $V0 limit-usage /test_dir 100MB -# Checking for auxiliary mount -EXPECT "0" get_aux TEST $CLI volume quota $V0 limit-usage /test_dir/in_test_dir 150MB @@ -231,9 +227,7 @@ EXPECT 'off' volinfo_field $V0 'features.quota' EXPECT 'off' volinfo_field $V0 'features.inode-quota' EXPECT '' volinfo_field $V0 'features.quota-deem-statfs' -# aux mount should be removed TEST $CLI volume stop $V0; -EXPECT "1" get_aux rm -f $QDD cleanup; diff --git a/tests/basic/quota_aux_mount.t b/tests/basic/quota_aux_mount.t new file mode 100755 index 00000000000..78d7f47e373 --- /dev/null +++ b/tests/basic/quota_aux_mount.t @@ -0,0 +1,53 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc + +cleanup; + +##------------------------------------------------------------- +## Tests to verify that aux mount is unmounted after each quota +## command executes. +##------------------------------------------------------------- + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume info; + +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}; + +EXPECT "$V0" volinfo_field $V0 'Volume Name'; +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT '4' brick_count $V0 + +TEST $CLI volume start $V0; +EXPECT 'Started' volinfo_field $V0 'Status'; + +TEST $GFS -s $H0 --volfile-id $V0 $M0; + +TEST mkdir -p $M0/test_dir/ + +TEST $CLI volume quota $V0 enable +EXPECT 'on' volinfo_field $V0 'features.quota' +EXPECT 'on' volinfo_field $V0 'features.inode-quota' + +TEST $CLI volume quota $V0 limit-usage /test_dir 150MB +EXPECT "1" get_limit_aux +TEST $CLI volume quota $V0 limit-objects /test_dir 10 +EXPECT "1" get_limit_aux +EXPECT "150.0MB" quota_hard_limit "/test_dir"; +EXPECT "1" get_list_aux +EXPECT "10" quota_object_hard_limit "/test_dir"; +EXPECT "1" get_list_aux + +TEST $CLI volume quota $V0 remove /test_dir/ +EXPECT "1" get_limit_aux +TEST $CLI volume quota $V0 remove-objects /test_dir +EXPECT "1" get_limit_aux + +TEST $CLI volume quota $V0 disable + +TEST $CLI volume stop $V0; + +cleanup; +#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1447344 diff --git a/tests/bugs/cli/bug-1022905.t b/tests/bugs/cli/bug-1022905.t index 1d8981e0e9c..ee629e970d9 100644 --- a/tests/bugs/cli/bug-1022905.t +++ b/tests/bugs/cli/bug-1022905.t @@ -32,7 +32,6 @@ TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG TEST $CLI volume reset $V0 force; TEST $CLI volume stop $V0 -EXPECT "1" get_aux TEST $CLI volume delete $V0 cleanup; diff --git a/tests/bugs/distribute/bug-1099890.t b/tests/bugs/distribute/bug-1099890.t index 40f70d4938b..1a19ba880c0 100644 --- a/tests/bugs/distribute/bug-1099890.t +++ b/tests/bugs/distribute/bug-1099890.t @@ -123,7 +123,6 @@ EXPECT "1" is_dht_linkfile "$B0/${V0}1/zz" force_umount $M0 TEST $CLI volume stop $V0 -EXPECT "1" get_aux UMOUNT_LOOP ${B0}/${V0}{1,2} rm -f ${B0}/brick{1,2} diff --git a/tests/bugs/distribute/bug-1161156.t b/tests/bugs/distribute/bug-1161156.t index 44a234c60dc..fed90e7f478 100755 --- a/tests/bugs/distribute/bug-1161156.t +++ b/tests/bugs/distribute/bug-1161156.t @@ -50,7 +50,6 @@ TEST ! mv $N0/dir/newfile_3 $N0/newdir/ umount_nfs $N0 TEST $CLI volume stop $V0 -EXPECT "1" get_aux rm -f $QDD diff --git a/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t b/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t index 9fe55a3d9df..de48c091c7e 100755 --- a/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t +++ b/tests/bugs/glusterd/bug-765230-remove-quota-related-option-after-disabling-quota.t @@ -54,7 +54,6 @@ EXPECT '' volinfo_field $V0 'features.quota-deem-statfs' ## Finish up TEST $CLI volume stop $V0 -EXPECT "1" get_aux EXPECT 'Stopped' volinfo_field $V0 'Status'; TEST $CLI volume delete $V0; diff --git a/tests/bugs/glusterfs/bug-848251.t b/tests/bugs/glusterfs/bug-848251.t index ed3caa34b01..69ffe680f7f 100644 --- a/tests/bugs/glusterfs/bug-848251.t +++ b/tests/bugs/glusterfs/bug-848251.t @@ -48,6 +48,5 @@ EXPECT "80%" quota_list EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR TEST rm -rf $MOUNTDIR TEST $CLI volume stop $V0 -EXPECT "1" get_aux cleanup; diff --git a/tests/bugs/posix/bug-990028.t b/tests/bugs/posix/bug-990028.t index d04bb2b4af1..c86421492cd 100755 --- a/tests/bugs/posix/bug-990028.t +++ b/tests/bugs/posix/bug-990028.t @@ -153,6 +153,5 @@ __init; links_in_same_directory; links_across_directories; TEST $CLI volume stop $V0 -EXPECT "1" get_aux cleanup diff --git a/tests/bugs/quota/bug-1087198.t b/tests/bugs/quota/bug-1087198.t index 0694b251d9f..95133085f13 100644 --- a/tests/bugs/quota/bug-1087198.t +++ b/tests/bugs/quota/bug-1087198.t @@ -78,7 +78,6 @@ TEST grep -e "\"Usage is above soft limit:.*used by /\"" -- $BRICK_LOG_DIR/* EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 TEST $CLI volume stop $V0 -EXPECT "1" get_aux rm -f $QDD diff --git a/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t b/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t index 366937245f2..addc05917d8 100644 --- a/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t +++ b/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t @@ -27,7 +27,6 @@ EXPECT '1' get_snap_count CLI_1 $V0 TEST $CLI_1 volume stop $V0 EXPECT 'Stopped' volinfo_field $V0 'Status' -EXPECT "1" get_aux TEST $CLI_1 snapshot restore $($CLI_1 snapshot list) EXPECT '0' get_snap_count CLI_1 $V0 diff --git a/tests/volume.rc b/tests/volume.rc index 5ea75a51d22..f95c0013b2e 100644 --- a/tests/volume.rc +++ b/tests/volume.rc @@ -561,8 +561,9 @@ function num_graphs function get_aux() { ##Check if a auxiliary mount is there +local aux_suffix=$1 local rundir=$(gluster --print-statedumpdir) -local pidfile="${rundir}/${V0}.pid" +local pidfile="${rundir}/${V0}$aux_suffix.pid" if [ -f $pidfile ]; then local pid=$(cat ${rundir}/${V0}.pid) @@ -579,6 +580,18 @@ else fi } +function get_list_aux() +{ +# check for quota list aux mount + get_aux "_quota_list" +} + +function get_limit_aux() +{ +# check for quota list aux mount + get_aux "_quota_limit" +} + function get_bitd_count { ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | wc -l } @@ -657,6 +670,10 @@ function quota_hl_exceeded() } +function quota_object_hard_limit() +{ + quota_object_list_field $1 2 +} function scrub_status() { -- cgit From ba5115bccdc08f395a88d682d3df4af0e4bbfb0a Mon Sep 17 00:00:00 2001 From: Gaurav Yadav Date: Mon, 22 May 2017 23:25:47 +0530 Subject: libglusterfs : Fix crash in glusterd while peer probing glusterd crashes when port is being set explcitly to a range which is outside greater than short data type range. Eg. sysctl net.ipv4.ip_local_reserved_ports="49152-49156" In above case glusterd crashes while parsing the port. With this fix glusterd will be able to handle port range between INT_MIN to INT_MAX > Reviewed-on: https://review.gluster.org/17359 > Smoke: Gluster Build System > NetBSD-regression: NetBSD Build System > CentOS-regression: Gluster Build System > Reviewed-by: Samikshan Bairagya > Reviewed-by: Atin Mukherjee > Reviewed-by: Niels de Vos > Reviewed-by: Jeff Darcy Change-Id: I7c75ee67937b0e3384502973d96b1c36c89e0fe1 BUG: 1447523 Signed-off-by: Gaurav Yadav Reviewed-on: https://review.gluster.org/17505 Smoke: Gluster Build System NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System Reviewed-by: Atin Mukherjee --- tests/bugs/glusterd/bug-1454418-seg-fault.t | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 tests/bugs/glusterd/bug-1454418-seg-fault.t (limited to 'tests') diff --git a/tests/bugs/glusterd/bug-1454418-seg-fault.t b/tests/bugs/glusterd/bug-1454418-seg-fault.t new file mode 100644 index 00000000000..eafaa55ede8 --- /dev/null +++ b/tests/bugs/glusterd/bug-1454418-seg-fault.t @@ -0,0 +1,25 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + + +cleanup; + +## Setting Port number in specific range +sysctl net.ipv4.ip_local_reserved_ports="24007-24008,32765-32768,49152-49156" + +## Start a 2 node virtual cluster +TEST launch_cluster 2; + + +## Peer probe server 2 from server 1 cli +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +sysctl net.ipv4.ip_local_reserved_ports=" +" + +cleanup; + -- cgit From 96e7452155c3fd1b160a97d88c03f1bf31b0be97 Mon Sep 17 00:00:00 2001 From: Atin Mukherjee Date: Sat, 18 Mar 2017 16:29:10 +0530 Subject: rpc: bump up conn->cleanup_gen in rpc_clnt_reconnect_cleanup Commit 086436a introduced generation number (cleanup_gen) to ensure that rpc layer doesn't end up cleaning up the connection object if application layer has already destroyed it. Bumping up cleanup_gen was done only in rpc_clnt_connection_cleanup (). However the same is needed in rpc_clnt_reconnect_cleanup () too as with out it if the object gets destroyed through the reconnect event in the application layer, rpc layer will still end up in trying to delete the object resulting into double free and crash. Peer probing an invalid host/IP was the basic test to catch this issue. Cherry picked from commit 39e09ad1e0e93f08153688c31433c38529f93716: > Change-Id: Id5332f3239cb324cead34eb51cf73d426733bd46 > BUG: 1433578 > Signed-off-by: Atin Mukherjee > Reviewed-on: https://review.gluster.org/16914 > Smoke: Gluster Build System > NetBSD-regression: NetBSD Build System > Reviewed-by: Milind Changire > CentOS-regression: Gluster Build System > Reviewed-by: Jeff Darcy Change-Id: Id5332f3239cb324cead34eb51cf73d426733bd46 BUG: 1462447 Signed-off-by: Niels de Vos Reviewed-on: https://review.gluster.org/17743 Smoke: Gluster Build System Reviewed-by: Milind Changire CentOS-regression: Gluster Build System --- .../glusterd/bug-1433578-invalid-peer-glusterd-crash.t | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 tests/bugs/glusterd/bug-1433578-invalid-peer-glusterd-crash.t (limited to 'tests') diff --git a/tests/bugs/glusterd/bug-1433578-invalid-peer-glusterd-crash.t b/tests/bugs/glusterd/bug-1433578-invalid-peer-glusterd-crash.t new file mode 100644 index 00000000000..1aea8bc134d --- /dev/null +++ b/tests/bugs/glusterd/bug-1433578-invalid-peer-glusterd-crash.t @@ -0,0 +1,14 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc + +cleanup; + +## Start glusterd +TEST glusterd; +TEST pidof glusterd; + +TEST ! $CLI peer probe invalid-peer + +TEST pidof glusterd; +cleanup; -- cgit From 117daf0c792f52b4c3fbc685b2f6b15841c81772 Mon Sep 17 00:00:00 2001 From: Ravishankar N <> Date: Mon, 17 Jul 2017 11:23:43 +0530 Subject: afr: mark non sources as sinks in metadata heal Backport of https://review.gluster.org/#/c/17717/ Problem: In a 3 way replica, when the source brick does not have pending xattrs for the sinks, but the 2 sinks blame each other, metadata heal was not happpening because we were not setting all non-sources as sinks. Fix: Mark all non-sources as sinks, like it is done in data and entry heal. Change-Id: I534978940f5087302e307fcc810a48ffe898ce08 BUG: 1471613 Signed-off-by: Ravishankar N Reviewed-on: https://review.gluster.org/17784 Smoke: Gluster Build System Reviewed-by: Pranith Kumar Karampuri CentOS-regression: Gluster Build System --- .../bug-1468279-source-not-blaming-sinks.t | 64 ++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 tests/bugs/replicate/bug-1468279-source-not-blaming-sinks.t (limited to 'tests') diff --git a/tests/bugs/replicate/bug-1468279-source-not-blaming-sinks.t b/tests/bugs/replicate/bug-1468279-source-not-blaming-sinks.t new file mode 100644 index 00000000000..054a4adb90d --- /dev/null +++ b/tests/bugs/replicate/bug-1468279-source-not-blaming-sinks.t @@ -0,0 +1,64 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0; +TEST touch $M0/file + +# Kill B1, create a pending metadata heal. +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST setfattr -n user.xattr -v value1 $M0/file +EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/file +EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2/file + +# Kill B2, heal from B3 to B1. +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0 +TEST kill_brick $V0 $H0 $B0/${V0}1 +TEST $CLI volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +$CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file trusted.afr.$V0-client-0 "metadata" +TEST $CLI volume set $V0 cluster.self-heal-daemon off + +# Create another pending metadata heal. +TEST setfattr -n user.xattr -v value2 $M0/file +EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/file +EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2/file + +# Kill B1, heal from B3 to B2 +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST $CLI volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +$CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file trusted.afr.$V0-client-1 "metadata" +TEST $CLI volume set $V0 cluster.self-heal-daemon off + +# ALL bricks up again. +TEST $CLI volume start $V0 force +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 +# B1 and B2 blame each other, B3 doesn't blame anyone. +EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/file +EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/file +EXPECT "0000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2/file +EXPECT "0000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2/file +TEST $CLI volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +cleanup; -- cgit