summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorRavishankar N <ravishankar@redhat.com>2015-05-30 10:23:33 +0530
committerRaghavendra Bhat <raghavendra@redhat.com>2015-06-15 06:18:44 -0700
commitfecebde3fbae17ace970a4d9c440f6455161dc62 (patch)
tree34ee1bea0384de838d21a46c61514817a4731521 /tests
parent4a1a1c9945656b3197b2677dd6c2142f02ade4dc (diff)
afr: honour selfheal enable/disable volume set options
Backport of http://review.gluster.org/11012 Note: http://review.gluster.org/9459 is not backported to 3.6 but the change it makes to afr_get_heal_info() (i.e. handling ret values) is needed for heal info to work correctly and tests/basic/afr/client-side-heal.t to pass. -------------------------- afr-v1 had the following volume set options that are used to enable/ disable self-heals from happening in AFR xlator when loaded in the client graph: cluster.metadata-self-heal cluster.data-self-heal cluster.entry-self-heal In afr-v2, these 3 heals can happen from the client if there is an inode refresh. This patch allows such heals to proceed only if the corresponding volume set options are set to true. -------------------------- Change-Id: Iebf863758d902fd2f95be320c6791d4e15f634e7 BUG: 1230259 Signed-off-by: Ravishankar N <ravishankar@redhat.com> Reviewed-on: http://review.gluster.org/11170 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anuradha Talur <atalur@redhat.com> Reviewed-by: Raghavendra Bhat <raghavendra@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/basic/afr/client-side-heal.t86
1 files changed, 86 insertions, 0 deletions
diff --git a/tests/basic/afr/client-side-heal.t b/tests/basic/afr/client-side-heal.t
new file mode 100644
index 00000000000..c9b3e355802
--- /dev/null
+++ b/tests/basic/afr/client-side-heal.t
@@ -0,0 +1,86 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+echo "some data" > $M0/datafile
+EXPECT 0 echo $?
+TEST touch $M0/mdatafile
+TEST mkdir $M0/dir
+
+#Kill a brick and perform I/O to have pending heals.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" afr_child_up_status $V0 0
+
+#pending data heal
+echo "some more data" >> $M0/datafile
+EXPECT 0 echo $?
+
+#pending metadata heal
+TEST chmod +x $M0/mdatafile
+
+#pending entry heal. Also causes pending metadata/data heals on file{1..5}
+TEST touch $M0/dir/file{1..5}
+
+EXPECT 8 afr_get_pending_heal_count $V0
+
+#After brick comes back up, access from client should not trigger heals
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Medatada heal via explicit lookup must not happen
+TEST ls $M0/mdatafile
+
+#Inode refresh must not trigger data and entry heals.
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
+#Check that data heal does not happen.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST cat $M0/datafile
+#Check that entry heal does not happen.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST ls $M0/dir
+
+#No heal must have happened
+EXPECT 8 afr_get_pending_heal_count $V0
+
+#Enable heal client side heal options and trigger heals
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+#Metadata heal is triggered by lookup without need for inode refresh.
+TEST ls $M0/mdatafile
+EXPECT 7 afr_get_pending_heal_count $V0
+
+#Inode refresh must trigger data and entry heals.
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST cat $M0/datafile
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST ls $M0/dir
+EXPECT 5 afr_get_pending_heal_count $V0
+
+TEST cat $M0/dir/file1
+TEST cat $M0/dir/file2
+TEST cat $M0/dir/file3
+TEST cat $M0/dir/file4
+TEST cat $M0/dir/file5
+
+EXPECT 0 afr_get_pending_heal_count $V0
+cleanup;