summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKrutika Dhananjay <kdhananj@redhat.com>2016-11-18 15:38:00 +0530
committerPranith Kumar Karampuri <pkarampu@redhat.com>2016-11-26 04:19:08 -0800
commitd557d097851d335effe0a2e810ca3f664c30e93f (patch)
tree403edd6a6bf4d8da927548026f15fe4e7027709a
parentc485ea191de824102adb5cd092bd75931f65de2c (diff)
features/index: Delete granular entry indices of already healed directories during crawl
Backport of: http://review.gluster.org/15880 If granular name indices are already in existence for a volume, and before they are healed, granular entry heal be disabled, a crawl on indices/xattrop will clear the changelogs on these directories. When their corresponding entry-changes indices are crawled subsequently, if it is found that the directories don't need heal anymore, the granular indices are not cleaned up. This patch fixes that problem by ensuring that the zero-xattrop also deletes the stale indices at the level of index translator. Change-Id: Iae0a560c1c9d37b083cad89f16d3dcf83c4f7dc7 BUG: 1398501 Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com> Reviewed-on: http://review.gluster.org/15927 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
-rw-r--r--tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t76
-rw-r--r--xlators/features/index/src/index.c23
2 files changed, 97 insertions, 2 deletions
diff --git a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
new file mode 100644
index 00000000000..2da90a98c76
--- /dev/null
+++ b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 granular-entry-heal on
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Create files under root
+for i in {1..2}
+do
+ echo $i > $M0/f$i
+done
+
+# Test that the index associated with '/' is created on B1.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Check for successful creation of granular entry indices
+for i in {1..2}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i
+done
+
+# Now disable granular-entry-heal
+TEST $CLI volume set $V0 granular-entry-heal off
+
+# Start the brick that was down
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# Enable shd
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+# Now the indices created are granular but the heal is going to be of the
+# normal kind. We test to make sure that heal still completes fine and that
+# the stale granular indices are going to be deleted
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Test if data was healed
+for i in {1..2}
+do
+ TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i
+done
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+cleanup
diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c
index 7b8713c89ef..f68dd55d766 100644
--- a/xlators/features/index/src/index.c
+++ b/xlators/features/index/src/index.c
@@ -648,6 +648,8 @@ index_del (xlator_t *this, uuid_t gfid, const char *subdir, int type)
index_priv_t *priv = NULL;
int ret = 0;
char gfid_path[PATH_MAX] = {0};
+ char rename_dst[PATH_MAX] = {0,};
+ uuid_t uuid;
priv = this->private;
GF_ASSERT_AND_GOTO_WITH_ERROR (this->name, !gf_uuid_is_null (gfid),
@@ -655,10 +657,27 @@ index_del (xlator_t *this, uuid_t gfid, const char *subdir, int type)
make_gfid_path (priv->index_basepath, subdir, gfid,
gfid_path, sizeof (gfid_path));
- if ((strcmp (subdir, ENTRY_CHANGES_SUBDIR)) == 0)
+ if ((strcmp (subdir, ENTRY_CHANGES_SUBDIR)) == 0) {
ret = sys_rmdir (gfid_path);
- else
+ /* rmdir above could fail with ENOTEMPTY if the indices under
+ * it were created when granular-entry-heal was enabled, whereas
+ * the actual heal that happened was non-granular (or full) in
+ * nature, resulting in name indices getting left out. To
+ * clean up this directory without it affecting the IO path perf,
+ * the directory is renamed to a unique name under
+ * indices/entry-changes. Self-heal will pick up this entry
+ * during crawl and on lookup into the file system figure that
+ * the index is stale and subsequently wipe it out using rmdir().
+ */
+ if ((ret) && (errno == ENOTEMPTY)) {
+ gf_uuid_generate (uuid);
+ make_gfid_path (priv->index_basepath, subdir, uuid,
+ rename_dst, sizeof (rename_dst));
+ ret = sys_rename (gfid_path, rename_dst);
+ }
+ } else {
ret = sys_unlink (gfid_path);
+ }
if (ret && (errno != ENOENT)) {
gf_log (this->name, GF_LOG_ERROR, "%s: failed to delete"