summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRavishankar N <ravishankar@redhat.com>2015-05-30 10:23:33 +0530
committerPranith Kumar Karampuri <pkarampu@redhat.com>2015-06-03 03:50:47 -0700
commitda111ae21429d33179cd11409bc171fae9d55194 (patch)
tree75b00dccac53a17255c6e856fb39fa1c8dc7a5da
parent9798a24febba9bbf28e97656b81b8a01a1325f68 (diff)
afr: honour selfheal enable/disable volume set options
afr-v1 had the following volume set options that are used to enable/ disable self-heals from happening in AFR xlator when loaded in the client graph: cluster.metadata-self-heal cluster.data-self-heal cluster.entry-self-heal In afr-v2, these 3 heals can happen from the client if there is an inode refresh. This patch allows such heals to proceed only if the corresponding volume set options are set to true. Change-Id: I8d97d6020611152e73a269f3fdb607652c66cc86 BUG: 1226507 Signed-off-by: Ravishankar N <ravishankar@redhat.com> Reviewed-on: http://review.gluster.org/11012 Tested-by: NetBSD Build System <jenkins@build.gluster.org> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
-rw-r--r--tests/basic/afr/client-side-heal.t86
-rw-r--r--xlators/cluster/afr/src/afr-common.c3
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-common.c11
3 files changed, 97 insertions, 3 deletions
diff --git a/tests/basic/afr/client-side-heal.t b/tests/basic/afr/client-side-heal.t
new file mode 100644
index 00000000000..c9b3e355802
--- /dev/null
+++ b/tests/basic/afr/client-side-heal.t
@@ -0,0 +1,86 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+echo "some data" > $M0/datafile
+EXPECT 0 echo $?
+TEST touch $M0/mdatafile
+TEST mkdir $M0/dir
+
+#Kill a brick and perform I/O to have pending heals.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" afr_child_up_status $V0 0
+
+#pending data heal
+echo "some more data" >> $M0/datafile
+EXPECT 0 echo $?
+
+#pending metadata heal
+TEST chmod +x $M0/mdatafile
+
+#pending entry heal. Also causes pending metadata/data heals on file{1..5}
+TEST touch $M0/dir/file{1..5}
+
+EXPECT 8 afr_get_pending_heal_count $V0
+
+#After brick comes back up, access from client should not trigger heals
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Medatada heal via explicit lookup must not happen
+TEST ls $M0/mdatafile
+
+#Inode refresh must not trigger data and entry heals.
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
+#Check that data heal does not happen.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST cat $M0/datafile
+#Check that entry heal does not happen.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST ls $M0/dir
+
+#No heal must have happened
+EXPECT 8 afr_get_pending_heal_count $V0
+
+#Enable heal client side heal options and trigger heals
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+#Metadata heal is triggered by lookup without need for inode refresh.
+TEST ls $M0/mdatafile
+EXPECT 7 afr_get_pending_heal_count $V0
+
+#Inode refresh must trigger data and entry heals.
+#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST cat $M0/datafile
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+TEST ls $M0/dir
+EXPECT 5 afr_get_pending_heal_count $V0
+
+TEST cat $M0/dir/file1
+TEST cat $M0/dir/file2
+TEST cat $M0/dir/file3
+TEST cat $M0/dir/file4
+TEST cat $M0/dir/file5
+
+EXPECT 0 afr_get_pending_heal_count $V0
+cleanup;
diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c
index 4d0a5627381..2bbf0e3570e 100644
--- a/xlators/cluster/afr/src/afr-common.c
+++ b/xlators/cluster/afr/src/afr-common.c
@@ -1752,6 +1752,9 @@ afr_can_start_metadata_self_heal(call_frame_t *frame, xlator_t *this)
replies = local->replies;
priv = this->private;
+ if (!priv->metadata_self_heal)
+ return _gf_false;
+
for (i = 0; i < priv->child_count; i++) {
if(!replies[i].valid || replies[i].op_ret == -1)
continue;
diff --git a/xlators/cluster/afr/src/afr-self-heal-common.c b/xlators/cluster/afr/src/afr-self-heal-common.c
index c1b116ed9c8..9a9a852b4d7 100644
--- a/xlators/cluster/afr/src/afr-self-heal-common.c
+++ b/xlators/cluster/afr/src/afr-self-heal-common.c
@@ -1303,6 +1303,11 @@ afr_selfheal_do (call_frame_t *frame, xlator_t *this, uuid_t gfid)
gf_boolean_t data_selfheal = _gf_false;
gf_boolean_t metadata_selfheal = _gf_false;
gf_boolean_t entry_selfheal = _gf_false;
+ afr_private_t *priv = NULL;
+ gf_boolean_t dataheal_enabled = _gf_false;
+
+ priv = this->private;
+ gf_string2boolean (priv->data_self_heal, &dataheal_enabled);
ret = afr_selfheal_unlocked_inspect (frame, this, gfid, &inode,
&data_selfheal,
@@ -1316,13 +1321,13 @@ afr_selfheal_do (call_frame_t *frame, xlator_t *this, uuid_t gfid)
goto out;
}
- if (data_selfheal)
+ if (data_selfheal && dataheal_enabled)
data_ret = afr_selfheal_data (frame, this, inode);
- if (metadata_selfheal)
+ if (metadata_selfheal && priv->metadata_self_heal)
metadata_ret = afr_selfheal_metadata (frame, this, inode);
- if (entry_selfheal)
+ if (entry_selfheal && priv->entry_self_heal)
entry_ret = afr_selfheal_entry (frame, this, inode);
or_ret = (data_ret | metadata_ret | entry_ret);