summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
diff options
context:
space:
mode:
authorKrutika Dhananjay <kdhananj@redhat.com>2016-09-22 16:48:54 +0530
committerPranith Kumar Karampuri <pkarampu@redhat.com>2016-11-29 02:52:25 -0800
commite06f4590616dfe7b93e8ac147ed812756df4f22d (patch)
tree82117a44d2c057eeb998ac30c11beb776438a5c0 /xlators/mgmt/glusterd/src/glusterd-volume-ops.c
parent31927cd4db62b338f4763ce00ab512d20e4d8c32 (diff)
cluster/afr: CLI for granular entry heal enablement/disablement
Backport of: http://review.gluster.org/15747 When there are already existing non-granular indices created that are yet to be healed, if granular-entry-heal option is toggled from 'off' to 'on', AFR self-heal whenever it kicks in, will try to look for granular indices in 'entry-changes'. Because of the absence of name indices, granular entry healing logic will fail to heal these directories, and worse yet unset pending extended attributes with the assumption that are no entries that need heal. To get around this, a new CLI is introduced which will invoke glfsheal program to figure whether at the time an attempt is made to enable granular entry heal, there are pending heals on the volume OR there are one or more bricks that are down. If either of them is true, the command will be failed with the appropriate error. New CLI: gluster volume heal <VOL> granular-entry-heal {enable,disable} Change-Id: I342e0390f847fcb015a50ef58aedfcbcb58f4ed3 BUG: 1398501 Signed-off-by: Krutika Dhananjay <kdhananj@redhat.com> Reviewed-on: http://review.gluster.org/15942 NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> Smoke: Gluster Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-volume-ops.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c61
1 files changed, 46 insertions, 15 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 72e14b0429d..b0a9372069e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -760,8 +760,9 @@ out:
return ret;
}
static int
-glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
- glusterd_volinfo_t *volinfo)
+glusterd_handle_heal_options_enable_disable (rpcsvc_request_t *req,
+ dict_t *dict,
+ glusterd_volinfo_t *volinfo)
{
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
int ret = 0;
@@ -775,30 +776,58 @@ glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
}
if ((heal_op != GF_SHD_OP_HEAL_ENABLE) &&
- (heal_op != GF_SHD_OP_HEAL_DISABLE)) {
+ (heal_op != GF_SHD_OP_HEAL_DISABLE) &&
+ (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) &&
+ (heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
ret = -EINVAL;
goto out;
}
- if (heal_op == GF_SHD_OP_HEAL_ENABLE) {
+ if (((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) &&
+ (volinfo->type == GF_CLUSTER_TYPE_DISPERSE)) {
+ ret = -1;
+ goto out;
+ }
+
+ if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)) {
value = "enable";
- } else if (heal_op == GF_SHD_OP_HEAL_DISABLE) {
+ } else if ((heal_op == GF_SHD_OP_HEAL_DISABLE) ||
+ (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
value = "disable";
}
/* Convert this command to volume-set command based on volume type */
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_handle_shd_option_for_tier (volinfo, value,
- dict);
- if (!ret)
- goto set_volume;
- goto out;
+ switch (heal_op) {
+ case GF_SHD_OP_HEAL_ENABLE:
+ case GF_SHD_OP_HEAL_DISABLE:
+ ret = glusterd_handle_shd_option_for_tier (volinfo,
+ value, dict);
+ if (!ret)
+ goto set_volume;
+ goto out;
+ /* For any other heal_op, including granular-entry heal,
+ * just break out of the block but don't goto out yet.
+ */
+ default:
+ break;
+ }
}
- key = volgen_get_shd_key (volinfo->type);
- if (!key) {
- ret = -1;
- goto out;
+ if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
+ (heal_op == GF_SHD_OP_HEAL_DISABLE)) {
+ key = volgen_get_shd_key (volinfo->type);
+ if (!key) {
+ ret = -1;
+ goto out;
+ }
+ } else {
+ key = "cluster.granular-entry-heal";
+ ret = dict_set_int8 (dict, "is-special-key", 1);
+ if (ret)
+ goto out;
}
ret = dict_set_str (dict, "key1", key);
@@ -884,7 +913,7 @@ __glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
goto out;
}
- ret = glusterd_handle_heal_enable_disable (req, dict, volinfo);
+ ret = glusterd_handle_heal_options_enable_disable (req, dict, volinfo);
if (ret == -EINVAL) {
ret = 0;
} else {
@@ -1823,6 +1852,8 @@ glusterd_handle_heal_cmd (xlator_t *this, glusterd_volinfo_t *volinfo,
case GF_SHD_OP_INVALID:
case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/
case GF_SHD_OP_HEAL_DISABLE:/* This op should be handled in volume-set*/
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE: /* This op should be handled in volume-set */
+ case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE: /* This op should be handled in volume-set */
case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:/*glfsheal cmd*/
case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:/*glfsheal cmd*/
case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:/*glfsheal cmd*/