summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cli/src/cli-cmd-parser.c16
-rw-r--r--cli/src/cli-cmd-volume.c28
-rw-r--r--cli/src/cli-rpc-ops.c8
-rw-r--r--heal/src/glfs-heal.c180
-rw-r--r--rpc/rpc-lib/src/protocol-common.h2
-rw-r--r--tests/basic/afr/granular-esh/add-brick.t2
-rw-r--r--tests/basic/afr/granular-esh/cli.t142
-rw-r--r--tests/basic/afr/granular-esh/conservative-merge.t4
-rw-r--r--tests/basic/afr/granular-esh/granular-esh.t2
-rw-r--r--tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t4
-rw-r--r--tests/basic/afr/granular-esh/replace-brick.t2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c61
13 files changed, 407 insertions, 63 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 38054a94f50..94bda6766f2 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -3821,7 +3821,8 @@ cli_cmd_volume_heal_options_parse (const char **words, int wordcount,
if (wordcount == 5) {
if (strcmp (words[3], "info") &&
- strcmp (words[3], "statistics")) {
+ strcmp (words[3], "statistics") &&
+ strcmp (words[3], "granular-entry-heal")) {
ret = -1;
goto out;
}
@@ -3851,6 +3852,19 @@ cli_cmd_volume_heal_options_parse (const char **words, int wordcount,
goto done;
}
}
+
+ if (!strcmp (words[3], "granular-entry-heal")) {
+ if (!strcmp (words[4], "enable")) {
+ ret = dict_set_int32 (dict, "heal-op",
+ GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE);
+ goto done;
+ } else if (!strcmp (words[4], "disable")) {
+ ret = dict_set_int32 (dict, "heal-op",
+ GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE);
+ goto done;
+ }
+ }
+
ret = -1;
goto out;
}
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index 018814058a8..dc5c6ea5a92 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -2758,7 +2758,8 @@ cli_print_brick_status (cli_volume_status_t *status)
(op == GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME) ||\
(op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) || \
(op == GF_SHD_OP_INDEX_SUMMARY) || \
- (op == GF_SHD_OP_SPLIT_BRAIN_FILES))
+ (op == GF_SHD_OP_SPLIT_BRAIN_FILES) || \
+ (op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE))
int
cli_launch_glfs_heal (int heal_op, dict_t *options)
@@ -2807,6 +2808,10 @@ cli_launch_glfs_heal (int heal_op, dict_t *options)
runner_add_args (&runner, "xml", NULL);
}
break;
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE:
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE:
+ runner_add_args (&runner, "granular-entry-heal-op", NULL);
+ break;
default:
ret = -1;
}
@@ -2818,11 +2823,11 @@ cli_launch_glfs_heal (int heal_op, dict_t *options)
printf ("%s", out);
}
ret = runner_end (&runner);
- ret = WEXITSTATUS (ret);
out:
return ret;
}
+
int
cli_cmd_volume_heal_cbk (struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount)
@@ -2859,19 +2864,19 @@ cli_cmd_volume_heal_cbk (struct cli_state *state, struct cli_cmd_word *word,
goto out;
if (NEEDS_GLFS_HEAL (heal_op)) {
ret = cli_launch_glfs_heal (heal_op, options);
- if (ret == -1)
+ if (ret < 0)
+ goto out;
+ if (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)
goto out;
}
- else {
- proc = &cli_rpc_prog->proctable[GLUSTER_CLI_HEAL_VOLUME];
- CLI_LOCAL_INIT (local, words, frame, options);
+ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_HEAL_VOLUME];
- if (proc->fn) {
- ret = proc->fn (frame, THIS, options);
- }
- }
+ CLI_LOCAL_INIT (local, words, frame, options);
+ if (proc->fn) {
+ ret = proc->fn (frame, THIS, options);
+ }
out:
if (ret) {
cli_cmd_sent_status_get (&sent);
@@ -3280,7 +3285,8 @@ struct cli_cmd volume_cmds[] = {
"statistics [heal-count [replica <HOSTNAME:BRICKNAME>]] |"
"info [healed | heal-failed | split-brain] |"
"split-brain {bigger-file <FILE> | latest-mtime <FILE> |"
- "source-brick <HOSTNAME:BRICKNAME> [<FILE>]}]",
+ "source-brick <HOSTNAME:BRICKNAME> [<FILE>]} |"
+ "granular-entry-heal {enable | disable}]",
cli_cmd_volume_heal_cbk,
"self-heal commands on volume specified by <VOLNAME>"},
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 1f641f4a2fd..c6801e6a746 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -8961,6 +8961,14 @@ gf_cli_heal_volume_cbk (struct rpc_req *req, struct iovec *iov,
operation = "";
heal_op_str = "Disable heal";
break;
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE:
+ operation = "";
+ heal_op_str = "Enable granular entry heal";
+ break;
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE:
+ operation = "";
+ heal_op_str = "Disable granular entry heal";
+ break;
}
if (rsp.op_ret) {
diff --git a/heal/src/glfs-heal.c b/heal/src/glfs-heal.c
index 2a85e755151..94cb6b04c02 100644
--- a/heal/src/glfs-heal.c
+++ b/heal/src/glfs-heal.c
@@ -39,7 +39,7 @@ xmlDocPtr glfsh_doc = NULL;
ret = 0; \
} while (0) \
-typedef void (*print_status) (dict_t *, char *, uuid_t, uint64_t *,
+typedef int (*print_status) (dict_t *, char *, uuid_t, uint64_t *,
gf_boolean_t flag);
int glfsh_heal_splitbrain_file (glfs_t *fs, xlator_t *top_subvol,
@@ -65,6 +65,11 @@ int32_t is_xml;
"source-brick <HOSTNAME:BRICKNAME> [<FILE>] | "\
"split-brain-info]\n"
+typedef enum {
+ GLFSH_MODE_CONTINUE_ON_ERROR = 1,
+ GLFSH_MODE_EXIT_ON_FIRST_FAILURE,
+} glfsh_fail_mode_t;
+
int
glfsh_init ()
{
@@ -72,6 +77,30 @@ glfsh_init ()
}
int
+glfsh_end_op_granular_entry_heal (int op_ret, char *op_errstr)
+{
+ /* If error sting is available, give it higher precedence.*/
+
+ if (op_errstr) {
+ printf ("%s\n", op_errstr);
+ } else if (op_ret < 0) {
+ if (op_ret == -EAGAIN)
+ printf ("One or more entries need heal. Please execute "
+ "the command again after there are no entries "
+ "to be healed\n");
+ else if (op_ret == -ENOTCONN)
+ printf ("One or more bricks could be down. Please "
+ "execute the command again after bringing all "
+ "bricks online and finishing any pending "
+ "heals\n");
+ else
+ printf ("Command failed - %s. Please check the logs for"
+ " more details\n", strerror (-op_ret));
+ }
+ return 0;
+}
+
+int
glfsh_end (int op_ret, char *op_errstr)
{
if (op_errstr)
@@ -87,6 +116,12 @@ glfsh_print_hr_spb_status (char *path, uuid_t gfid, char *status)
}
void
+glfsh_no_print_hr_heal_status (char *path, uuid_t gfid, char *status)
+{
+ return;
+}
+
+void
glfsh_print_hr_heal_status (char *path, uuid_t gfid, char *status)
{
printf ("%s%s\n", path, status);
@@ -296,6 +331,12 @@ out:
}
int
+glfsh_no_print_hr_heal_op_status (int ret, uint64_t num_entries, char *fmt_str)
+{
+ return 0;
+}
+
+int
glfsh_print_hr_heal_op_status (int ret, uint64_t num_entries, char *fmt_str)
{
if (ret < 0 && num_entries == 0) {
@@ -422,7 +463,7 @@ glfsh_index_purge (xlator_t *subvol, inode_t *inode, char *name)
return ret;
}
-void
+int
glfsh_print_spb_status (dict_t *dict, char *path, uuid_t gfid,
uint64_t *num_entries, gf_boolean_t flag)
{
@@ -434,7 +475,7 @@ glfsh_print_spb_status (dict_t *dict, char *path, uuid_t gfid,
ret = dict_get_str (dict, "heal-info", &value);
if (ret)
- return;
+ return 0;
if (!strcmp (value, "split-brain")) {
split_b = _gf_true;
@@ -456,10 +497,10 @@ glfsh_print_spb_status (dict_t *dict, char *path, uuid_t gfid,
gfid, NULL);
}
}
- return;
+ return 0;
}
-void
+int
glfsh_print_heal_status (dict_t *dict, char *path, uuid_t gfid,
uint64_t *num_entries, gf_boolean_t ignore_dirty)
{
@@ -471,7 +512,7 @@ glfsh_print_heal_status (dict_t *dict, char *path, uuid_t gfid,
ret = dict_get_str (dict, "heal-info", &value);
if (ret || (!strcmp (value, "no-heal")))
- return;
+ return 0;
if (!strcmp (value, "heal")) {
ret = gf_asprintf (&status, " ");
@@ -514,7 +555,7 @@ out:
if (pending) {
GF_FREE (status);
status = NULL;
- return;
+ return 0;
}
}
if (ret == -1)
@@ -527,7 +568,21 @@ out:
status ? status : "");
GF_FREE (status);
- return;
+ return 0;
+}
+
+int
+glfsh_heal_status_boolean (dict_t *dict, char *path, uuid_t gfid,
+ uint64_t *num_entries, gf_boolean_t ignore_dirty)
+{
+ int ret = 0;
+ char *value = NULL;
+
+ ret = dict_get_str (dict, "heal-info", &value);
+ if ((!ret) && (!strcmp (value, "no-heal")))
+ return 0;
+ else
+ return -1;
}
static int
@@ -561,11 +616,12 @@ static int
glfsh_process_entries (xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
uint64_t *offset, uint64_t *num_entries,
print_status glfsh_print_status,
- gf_boolean_t ignore_dirty)
+ gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode)
{
gf_dirent_t *entry = NULL;
gf_dirent_t *tmp = NULL;
int ret = 0;
+ int print_status = 0;
char *path = NULL;
uuid_t gfid = {0};
xlator_t *this = NULL;
@@ -591,8 +647,13 @@ glfsh_process_entries (xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
gf_uuid_copy (loc.gfid, gfid);
ret = syncop_getxattr (this, &loc, &dict, GF_HEAL_INFO, NULL,
NULL);
- if (ret)
- continue;
+ if (ret) {
+ if ((mode != GLFSH_MODE_CONTINUE_ON_ERROR) &&
+ (ret == -ENOTCONN))
+ goto out;
+ else
+ continue;
+ }
ret = syncop_gfid_to_path (this->itable, xl, gfid, &path);
@@ -601,11 +662,19 @@ glfsh_process_entries (xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
ret = 0;
continue;
}
- if (dict)
- glfsh_print_status (dict, path, gfid,
- num_entries, ignore_dirty);
+ if (dict) {
+ print_status = glfsh_print_status (dict, path, gfid,
+ num_entries,
+ ignore_dirty);
+ if ((print_status) &&
+ (mode != GLFSH_MODE_CONTINUE_ON_ERROR)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
}
ret = 0;
+out:
GF_FREE (path);
if (dict) {
dict_unref (dict);
@@ -620,17 +689,21 @@ glfsh_crawl_directory (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
dict_t *xattr_req, uint64_t *num_entries,
gf_boolean_t ignore)
{
- uint64_t offset = 0;
+ int ret = 0;
+ int heal_op = -1;
+ uint64_t offset = 0;
gf_dirent_t entries;
- int ret = 0;
gf_boolean_t free_entries = _gf_false;
- int heal_op = -1;
+ glfsh_fail_mode_t mode = GLFSH_MODE_CONTINUE_ON_ERROR;
INIT_LIST_HEAD (&entries.list);
ret = dict_get_int32 (xattr_req, "heal-op", &heal_op);
if (ret)
return ret;
+ if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)
+ mode = GLFSH_MODE_EXIT_ON_FIRST_FAILURE;
+
while (1) {
ret = syncop_readdir (readdir_xl, fd, 131072, offset, &entries,
NULL, NULL);
@@ -647,7 +720,7 @@ glfsh_crawl_directory (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
&entries, &offset,
num_entries,
glfsh_print_heal_status,
- ignore);
+ ignore, mode);
if (ret < 0)
goto out;
} else if (heal_op == GF_SHD_OP_SPLIT_BRAIN_FILES) {
@@ -655,13 +728,20 @@ glfsh_crawl_directory (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
&entries, &offset,
num_entries,
glfsh_print_spb_status,
- ignore);
+ ignore, mode);
if (ret < 0)
goto out;
} else if (heal_op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) {
ret = glfsh_heal_entries (fs, top_subvol, rootloc,
&entries, &offset,
num_entries, xattr_req);
+ } else if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) {
+ ret = glfsh_process_entries (readdir_xl, fd, &entries,
+ &offset, num_entries,
+ glfsh_heal_status_boolean,
+ ignore, mode);
+ if (ret < 0)
+ goto out;
}
gf_dirent_free (&entries);
free_entries = _gf_false;
@@ -674,6 +754,12 @@ out:
}
static int
+glfsh_no_print_brick_from_xl (xlator_t *xl, loc_t *rootloc)
+{
+ return 0;
+}
+
+static int
glfsh_print_brick_from_xl (xlator_t *xl, loc_t *rootloc)
{
char *remote_host = NULL;
@@ -751,6 +837,13 @@ glfsh_print_pending_heals (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
if (ret)
goto out;
+ if ((!is_parent_replicate) &&
+ ((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE))) {
+ ret = 0;
+ goto out;
+ }
+
ret = glfsh_output->print_brick_from_xl (xl, rootloc);
if (ret < 0)
goto out;
@@ -758,6 +851,10 @@ glfsh_print_pending_heals (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
ret = glfsh_print_pending_heals_type (fs, top_subvol, rootloc, xl,
heal_op, xattr_req,
GF_XATTROP_INDEX_GFID, &count);
+
+ if (ret < 0 && heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)
+ goto out;
+
total += count;
count = 0;
if (ret == -ENOTCONN)
@@ -838,14 +935,14 @@ out:
return NULL;
}
-
int
glfsh_gather_heal_info (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
gf_xl_afr_op_t heal_op)
{
- xlator_t *xl = NULL;
+ int ret = 0;
+ xlator_t *xl = NULL;
xlator_t *heal_xl = NULL;
- xlator_t *old_THIS = NULL;
+ xlator_t *old_THIS = NULL;
xl = top_subvol;
while (xl->next)
@@ -856,20 +953,28 @@ glfsh_gather_heal_info (glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
if (heal_xl) {
old_THIS = THIS;
THIS = heal_xl;
- glfsh_print_pending_heals (fs, top_subvol,
- rootloc, xl,
- heal_op,
- !strcmp
- (heal_xl->type,
- "cluster/replicate"));
+ ret = glfsh_print_pending_heals (fs, top_subvol,
+ rootloc, xl,
+ heal_op,
+ !strcmp
+ (heal_xl->type,
+ "cluster/replicate"));
THIS = old_THIS;
+
+ if ((ret < 0) &&
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE))
+ goto out;
}
}
xl = xl->prev;
}
- return 0;
+out:
+ if (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)
+ ret = 0;
+
+ return ret;
}
int
@@ -1107,6 +1212,15 @@ glfsh_info_t glfsh_human_readable = {
.end = glfsh_end
};
+glfsh_info_t glfsh_no_print = {
+ .init = glfsh_init,
+ .print_brick_from_xl = glfsh_no_print_brick_from_xl,
+ .print_heal_op_status = glfsh_no_print_hr_heal_op_status,
+ .print_heal_status = glfsh_no_print_hr_heal_status,
+ .print_spb_status = glfsh_no_print_hr_heal_status,
+ .end = glfsh_end_op_granular_entry_heal
+};
+
#if (HAVE_LIB_XML)
glfsh_info_t glfsh_xml_output = {
.init = glfsh_xml_init,
@@ -1150,6 +1264,8 @@ main (int argc, char **argv)
} else if (!strcmp (argv[2], "xml")) {
heal_op = GF_SHD_OP_INDEX_SUMMARY;
is_xml = 1;
+ } else if (!strcmp (argv[2], "granular-entry-heal-op")) {
+ heal_op = GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE;
} else {
printf (USAGE_STR, argv[0]);
ret = -1;
@@ -1206,6 +1322,9 @@ main (int argc, char **argv)
}
+ if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)
+ glfsh_output = &glfsh_no_print;
+
ret = glfsh_output->init ();
if (ret)
exit (EXIT_FAILURE);
@@ -1282,6 +1401,7 @@ main (int argc, char **argv)
switch (heal_op) {
case GF_SHD_OP_INDEX_SUMMARY:
case GF_SHD_OP_SPLIT_BRAIN_FILES:
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE:
ret = glfsh_gather_heal_info (fs, top_subvol, &rootloc,
heal_op);
break;
@@ -1300,6 +1420,8 @@ main (int argc, char **argv)
}
glfsh_output->end (ret, NULL);
+ if (ret < 0)
+ ret = -ret;
loc_wipe (&rootloc);
glfs_subvol_done (fs, top_subvol);
cleanup (fs);
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index 49c3d963578..c5d14e8667f 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -259,6 +259,8 @@ typedef enum {
GF_SHD_OP_HEAL_ENABLE,
GF_SHD_OP_HEAL_DISABLE,
GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME,
+ GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE,
+ GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE,
} gf_xl_afr_op_t ;
struct gf_gsync_detailed_status_ {
diff --git a/tests/basic/afr/granular-esh/add-brick.t b/tests/basic/afr/granular-esh/add-brick.t
index f3125d7fe7d..270cf1d32a6 100644
--- a/tests/basic/afr/granular-esh/add-brick.t
+++ b/tests/basic/afr/granular-esh/add-brick.t
@@ -14,7 +14,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 self-heal-daemon off
-TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume heal $V0 granular-entry-heal enable
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
diff --git a/tests/basic/afr/granular-esh/cli.t b/tests/basic/afr/granular-esh/cli.t
new file mode 100644
index 00000000000..a655180a095
--- /dev/null
+++ b/tests/basic/afr/granular-esh/cli.t
@@ -0,0 +1,142 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+# Test that enabling the option should work on a newly created volume
+TEST $CLI volume set $V0 cluster.granular-entry-heal on
+TEST $CLI volume set $V0 cluster.granular-entry-heal off
+
+#########################
+##### DISPERSE TEST #####
+#########################
+# Execute the same command on a disperse volume and make sure it fails.
+TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2}
+TEST $CLI volume start $V1
+TEST ! $CLI volume heal $V1 granular-entry-heal enable
+TEST ! $CLI volume heal $V1 granular-entry-heal disable
+
+#######################
+###### TIER TEST ######
+#######################
+# Execute the same command on a disperse + replicate tiered volume and make
+# sure the option is set on the replicate leg of the volume
+TEST $CLI volume attach-tier $V1 replica 2 $H0:$B0/${V1}{3,4}
+TEST $CLI volume heal $V1 granular-entry-heal enable
+EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal
+TEST $CLI volume heal $V1 granular-entry-heal disable
+EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
+
+# Kill a disperse brick and make heal be pending on the volume.
+TEST kill_brick $V1 $H0 $B0/${V1}0
+
+# Now make sure that one offline brick in disperse does not affect enabling the
+# option on the volume.
+TEST $CLI volume heal $V1 granular-entry-heal enable
+EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal
+TEST $CLI volume heal $V1 granular-entry-heal disable
+EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
+
+# Now kill a replicate brick.
+TEST kill_brick $V1 $H0 $B0/${V1}3
+# Now make sure that one offline brick in replicate causes the command to be
+# failed.
+TEST ! $CLI volume heal $V1 granular-entry-heal enable
+EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
+
+######################
+### REPLICATE TEST ###
+######################
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+# Test that the volume-set way of enabling the option is disallowed
+TEST ! $CLI volume set $V0 granular-entry-heal on
+# Test that the volume-heal way of enabling the option is allowed
+TEST $CLI volume heal $V0 granular-entry-heal enable
+# Volume-reset of the option should be allowed
+TEST $CLI volume reset $V0 granular-entry-heal
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+EXPECT "enable" volume_option $V0 cluster.granular-entry-heal
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Disabling the option should work even when one or more bricks are down
+TEST $CLI volume heal $V0 granular-entry-heal disable
+# When a brick is down, 'enable' attempt should be failed
+TEST ! $CLI volume heal $V0 granular-entry-heal enable
+
+# Restart the killed brick
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# When all bricks are up, it should be possible to enable the option
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+# Kill brick-0 again
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# Create files under root
+for i in {1..2}
+do
+ echo $i > $M0/f$i
+done
+
+# Test that the index associated with '/' is created on B1.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Check for successful creation of granular entry indices
+for i in {1..2}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i
+done
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Test if data was healed
+for i in {1..2}
+do
+ TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i
+done
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
+
+# Perform a volume-reset-all-options operation
+TEST $CLI volume reset $V0
+# Ensure that granular entry heal is also disabled
+EXPECT "no" volume_get_field $V0 cluster.granular-entry-heal
+EXPECT "on" volume_get_field $V0 cluster.entry-self-heal
+
+cleanup
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1399038
diff --git a/tests/basic/afr/granular-esh/conservative-merge.t b/tests/basic/afr/granular-esh/conservative-merge.t
index b566a0ea4d3..b170e47e0cb 100644
--- a/tests/basic/afr/granular-esh/conservative-merge.t
+++ b/tests/basic/afr/granular-esh/conservative-merge.t
@@ -11,13 +11,13 @@ TESTS_EXPECTED_IN_LOOP=4
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
TEST $CLI volume set $V0 self-heal-daemon off
TEST $CLI volume set $V0 data-self-heal off
TEST $CLI volume set $V0 metadata-self-heal off
TEST $CLI volume set $V0 entry-self-heal off
-TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume heal $V0 granular-entry-heal enable
-TEST $CLI volume start $V0
TEST $GFS --volfile-id=$V0 -s $H0 $M0
TEST mkdir $M0/dir
diff --git a/tests/basic/afr/granular-esh/granular-esh.t b/tests/basic/afr/granular-esh/granular-esh.t
index ee53878e004..de0e8f4290b 100644
--- a/tests/basic/afr/granular-esh/granular-esh.t
+++ b/tests/basic/afr/granular-esh/granular-esh.t
@@ -16,7 +16,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 self-heal-daemon off
-TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume heal $V0 granular-entry-heal enable
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
diff --git a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
index 2da90a98c76..1b5421bf4b6 100644
--- a/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
+++ b/tests/basic/afr/granular-esh/granular-indices-but-non-granular-heal.t
@@ -17,7 +17,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 self-heal-daemon off
-TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume heal $V0 granular-entry-heal enable
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
@@ -40,7 +40,7 @@ do
done
# Now disable granular-entry-heal
-TEST $CLI volume set $V0 granular-entry-heal off
+TEST $CLI volume heal $V0 granular-entry-heal disable
# Start the brick that was down
TEST $CLI volume start $V0 force
diff --git a/tests/basic/afr/granular-esh/replace-brick.t b/tests/basic/afr/granular-esh/replace-brick.t
index aaa54da2a2c..639ed81b95c 100644
--- a/tests/basic/afr/granular-esh/replace-brick.t
+++ b/tests/basic/afr/granular-esh/replace-brick.t
@@ -12,7 +12,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 self-heal-daemon off
-TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume heal $V0 granular-entry-heal enable
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 56622538ad8..b2dfcb27f5b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -1119,6 +1119,25 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
if (key_fixed)
key = key_fixed;
+ if (strcmp (key, "cluster.granular-entry-heal") == 0) {
+ /* For granular entry-heal, if the set command was
+ * invoked through volume-set CLI, then allow the
+ * command only if the volume is still in 'Created'
+ * state
+ */
+ if ((dict_get (dict, "is-special-key") == NULL) &&
+ (volinfo->status != GLUSTERD_STATUS_NONE)) {
+ snprintf (errstr, sizeof (errstr), " 'gluster "
+ "volume set <VOLNAME> %s {enable, "
+ "disable}' is not supported. Use "
+ "'gluster volume heal <VOLNAME> "
+ "granular-entry-heal {enable, "
+ "disable}' instead.", key);
+ ret = -1;
+ goto out;
+ }
+ }
+
/* Check if the key is cluster.op-version and set
* local_new_op_version to the value given if possible.
*/
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index c14b3584efe..90a165d498e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -769,8 +769,9 @@ out:
return ret;
}
static int
-glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
- glusterd_volinfo_t *volinfo)
+glusterd_handle_heal_options_enable_disable (rpcsvc_request_t *req,
+ dict_t *dict,
+ glusterd_volinfo_t *volinfo)
{
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
int ret = 0;
@@ -784,30 +785,58 @@ glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
}
if ((heal_op != GF_SHD_OP_HEAL_ENABLE) &&
- (heal_op != GF_SHD_OP_HEAL_DISABLE)) {
+ (heal_op != GF_SHD_OP_HEAL_DISABLE) &&
+ (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) &&
+ (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
ret = -EINVAL;
goto out;
}
- if (heal_op == GF_SHD_OP_HEAL_ENABLE) {
+ if (((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) &&
+ (volinfo->type == GF_CLUSTER_TYPE_DISPERSE)) {
+ ret = -1;
+ goto out;
+ }
+
+ if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)) {
value = "enable";
- } else if (heal_op == GF_SHD_OP_HEAL_DISABLE) {
+ } else if ((heal_op == GF_SHD_OP_HEAL_DISABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
value = "disable";
}
/* Convert this command to volume-set command based on volume type */
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_handle_shd_option_for_tier (volinfo, value,
- dict);
- if (!ret)
- goto set_volume;
- goto out;
+ switch (heal_op) {
+ case GF_SHD_OP_HEAL_ENABLE:
+ case GF_SHD_OP_HEAL_DISABLE:
+ ret = glusterd_handle_shd_option_for_tier (volinfo,
+ value, dict);
+ if (!ret)
+ goto set_volume;
+ goto out;
+ /* For any other heal_op, including granular-entry heal,
+ * just break out of the block but don't goto out yet.
+ */
+ default:
+ break;
+ }
}
- key = volgen_get_shd_key (volinfo->type);
- if (!key) {
- ret = -1;
- goto out;
+ if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_HEAL_DISABLE)) {
+ key = volgen_get_shd_key (volinfo->type);
+ if (!key) {
+ ret = -1;
+ goto out;
+ }
+ } else {
+ key = "cluster.granular-entry-heal";
+ ret = dict_set_int8 (dict, "is-special-key", 1);
+ if (ret)
+ goto out;
}
ret = dict_set_str (dict, "key1", key);
@@ -893,7 +922,7 @@ __glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
goto out;
}
- ret = glusterd_handle_heal_enable_disable (req, dict, volinfo);
+ ret = glusterd_handle_heal_options_enable_disable (req, dict, volinfo);
if (ret == -EINVAL) {
ret = 0;
} else {
@@ -1832,6 +1861,8 @@ glusterd_handle_heal_cmd (xlator_t *this, glusterd_volinfo_t *volinfo,
case GF_SHD_OP_INVALID:
case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/
case GF_SHD_OP_HEAL_DISABLE:/* This op should be handled in volume-set*/
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE: /* This op should be handled in volume-set */
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE: /* This op should be handled in volume-set */
case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:/*glfsheal cmd*/
case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:/*glfsheal cmd*/
case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:/*glfsheal cmd*/