summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorMohit Agrawal <moagrawa@redhat.com>2018-05-31 12:29:35 +0530
committerAmar Tumballi <amarts@redhat.com>2018-07-11 03:13:24 +0000
commite57cbae0bcc3d8649b869eda5ec20f3c6a6d34f0 (patch)
tree52059d8e2e38143717b88e6f2683cfacf142463a /tests
parente31c7a7c0c1ea3f6e931935226fb976a92779ba7 (diff)
dht: Inconsistent permission for directories after brick stop/start
Problem: Inconsistent access permissions on directories after bringing back the down sub-volumes, in case of directories dht_setattr first wind a call on MDS once call is finished on MDS then wind a call on NON-MDS.At the time of revalidating dht just compare the uid/gid with stbuf uid/gid and if anyone differs set a flag to heal the same. Solution: Add a condition to compare permission also in dht_revalidate_cbk to set a flag to call dht_dir_attr_heal. BUG: 1584517 Change-Id: I3e039607148005015b5d93364536158380d4c5aa fixes: bz#1584517 Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/bugs/bug-1584517.t70
1 files changed, 70 insertions, 0 deletions
diff --git a/tests/bugs/bug-1584517.t b/tests/bugs/bug-1584517.t
new file mode 100644
index 00000000000..7f48015a034
--- /dev/null
+++ b/tests/bugs/bug-1584517.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+cleanup;
+#This test case verifies attributes (uid/gid/perm) for the
+#directory are healed after stop/start brick. To verify the same
+#test case change attributes of the directory after down a DHT subvolume
+#and one AFR children. After start the volume with force and run lookup
+#operation attributes should be healed on started bricks at the backend.
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume start $V0
+TEST useradd dev -M
+TEST groupadd QA
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST mkdir $M0/dironedown
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "5" online_brick_count
+
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "4" online_brick_count
+
+TEST kill_brick $V0 $H0 $B0/${V0}4
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "3" online_brick_count
+
+TEST kill_brick $V0 $H0 $B0/${V0}5
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "2" online_brick_count
+
+TEST chown dev $M0/dironedown
+TEST chgrp QA $M0/dironedown
+TEST chmod 777 $M0/dironedown
+
+#store the permissions for comparision
+permission_onedown=`ls -l $M0 | grep dironedown | awk '{print $1}'`
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Run lookup two times to hit revalidate code path in dht
+# to heal user attr
+
+TEST ls $M0/dironedown
+
+#check attributes those were created post brick going down
+TEST brick_perm=`ls -l $B0/${V0}3 | grep dironedown | awk '{print $1}'`
+TEST echo $brick_perm
+TEST [ ${brick_perm} = ${permission_onedown} ]
+uid=`ls -l $B0/${V0}3 | grep dironedown | awk '{print $3}'`
+TEST echo $uid
+TEST [ $uid = dev ]
+gid=`ls -l $B0/${V0}3 | grep dironedown | awk '{print $4}'`
+TEST echo $gid
+TEST [ $gid = QA ]
+
+TEST umount $M0
+userdel --force dev
+groupdel QA
+
+cleanup
+exit
+