summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c243
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c18
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c91
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c90
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c10
6 files changed, 331 insertions, 126 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 01e0658e52a..0ff2d478ef1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -1538,7 +1538,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
char msg[2048] = {0,};
char *volname = NULL;
char *brick = NULL;
- char *shd_key = NULL;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
@@ -1619,10 +1618,7 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
}
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
if (glusterd_is_shd_compatible_volume (volinfo)) {
- shd_key = volgen_get_shd_key(volinfo);
- shd_enabled = dict_get_str_boolean (vol_opts,
- shd_key,
- _gf_true);
+ shd_enabled = is_self_heal_enabled (volinfo, vol_opts);
} else {
ret = -1;
snprintf (msg, sizeof (msg),
@@ -3077,7 +3073,6 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
uint32_t cmd = 0;
char *volname = NULL;
char *brick = NULL;
- char *shd_key = NULL;
xlator_t *this = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
@@ -3250,12 +3245,9 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
other_count++;
node_count++;
}
- if (glusterd_is_shd_compatible_volume (volinfo)) {
- shd_key = volgen_get_shd_key (volinfo);
- shd_enabled = dict_get_str_boolean (vol_opts,
- shd_key,
- _gf_true);
- }
+
+ if (glusterd_is_shd_compatible_volume (volinfo))
+ shd_enabled = is_self_heal_enabled (volinfo, vol_opts);
if (shd_enabled) {
ret = glusterd_add_node_to_dict
(priv->shd_svc.name, rsp_dict,
@@ -6055,13 +6047,13 @@ out:
int
_select_hxlator_with_matching_brick (xlator_t *this,
- glusterd_volinfo_t *volinfo, dict_t *dict)
+ glusterd_volinfo_t *volinfo, dict_t *dict,
+ int *index)
{
char *hostname = NULL;
char *path = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_conf_t *priv = NULL;
- int index = 1;
int hxl_children = 0;
priv = this->private;
@@ -6071,6 +6063,8 @@ _select_hxlator_with_matching_brick (xlator_t *this,
return -1;
hxl_children = _get_hxl_children_count (volinfo);
+ if ((*index) == 0)
+ (*index)++;
cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
if (gf_uuid_is_null (brickinfo->uuid))
@@ -6078,22 +6072,21 @@ _select_hxlator_with_matching_brick (xlator_t *this,
if (!gf_uuid_compare (MY_UUID, brickinfo->uuid)) {
_add_hxlator_to_dict (dict, volinfo,
- (index - 1)/hxl_children, 0);
+ ((*index) - 1)/hxl_children, 0);
return 1;
}
- index++;
+ (*index)++;
}
return 0;
}
-int
+void
_select_hxlators_with_local_bricks (xlator_t *this, glusterd_volinfo_t *volinfo,
- dict_t *dict)
+ dict_t *dict, int *index,
+ int *hxlator_count)
{
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_conf_t *priv = NULL;
- int index = 1;
- int hxlator_count = 0;
int hxl_children = 0;
gf_boolean_t add = _gf_false;
int cmd_replica_index = -1;
@@ -6101,6 +6094,9 @@ _select_hxlators_with_local_bricks (xlator_t *this, glusterd_volinfo_t *volinfo,
priv = this->private;
hxl_children = _get_hxl_children_count (volinfo);
+ if ((*index) == 0)
+ (*index)++;
+
cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
if (gf_uuid_is_null (brickinfo->uuid))
(void)glusterd_resolve_brick (brickinfo);
@@ -6108,35 +6104,35 @@ _select_hxlators_with_local_bricks (xlator_t *this, glusterd_volinfo_t *volinfo,
if (!gf_uuid_compare (MY_UUID, brickinfo->uuid))
add = _gf_true;
- if (index % hxl_children == 0) {
+ if ((*index) % hxl_children == 0) {
if (add) {
- _add_hxlator_to_dict (dict, volinfo,
- (index - 1)/hxl_children,
- hxlator_count);
- hxlator_count++;
+ _add_hxlator_to_dict (dict, volinfo,
+ ((*index) - 1)/hxl_children,
+ (*hxlator_count));
+ (*hxlator_count)++;
}
add = _gf_false;
}
- index++;
+ (*index)++;
}
- return hxlator_count;
}
int
_select_hxlators_for_full_self_heal (xlator_t *this,
glusterd_volinfo_t *volinfo,
- dict_t *dict)
+ dict_t *dict, int *index,
+ int *hxlator_count)
{
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_conf_t *priv = NULL;
- int index = 1;
- int hxlator_count = 0;
int hxl_children = 0;
uuid_t candidate = {0};
priv = this->private;
+ if ((*index) == 0)
+ (*index)++;
if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
hxl_children = volinfo->disperse_count;
} else {
@@ -6150,19 +6146,19 @@ _select_hxlators_for_full_self_heal (xlator_t *this,
if (gf_uuid_compare (brickinfo->uuid, candidate) > 0)
gf_uuid_copy (candidate, brickinfo->uuid);
- if (index % hxl_children == 0) {
+ if ((*index) % hxl_children == 0) {
if (!gf_uuid_compare (MY_UUID, candidate)) {
_add_hxlator_to_dict (dict, volinfo,
- (index-1)/hxl_children,
- hxlator_count);
- hxlator_count++;
+ ((*index)-1)/hxl_children,
+ (*hxlator_count));
+ (*hxlator_count)++;
}
gf_uuid_clear (candidate);
}
- index++;
+ (*index)++;
}
- return hxlator_count;
+ return *hxlator_count;
}
@@ -6223,13 +6219,13 @@ out:
static int
fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo,
- cli_cmd_type type, dict_t *req_dict)
+ cli_cmd_type type, int *index,
+ dict_t *req_dict)
{
glusterd_brickinfo_t *brickinfo = NULL;
char msg[1024] = {0,};
char key[1024] = {0,};
char value[1024] = {0,};
- int index = 0;
int ret = 0;
xlator_t *this = NULL;
int cmd_replica_index = -1;
@@ -6255,18 +6251,18 @@ fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo,
(void)glusterd_resolve_brick (brickinfo);
if (gf_uuid_compare (MY_UUID, brickinfo->uuid)) {
- index++;
+ (*index)++;
continue;
}
if (type == PER_HEAL_XL) {
- if (cmd_replica_index != (index/volinfo->replica_count)) {
- index++;
+ if (cmd_replica_index != ((*index)/volinfo->replica_count)) {
+ (*index)++;
continue;
- }
+ }
}
- snprintf (key, sizeof (key), "%d-status",index);
+ snprintf (key, sizeof (key), "%d-status", (*index));
snprintf (value, sizeof (value), "%s %s",msg,
uuid_utoa(MY_UUID));
ret = dict_set_dynstr (dict, key, gf_strdup(value));
@@ -6276,7 +6272,7 @@ fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo,
"set the dictionary for shd status msg");
goto out;
}
- snprintf (key, sizeof (key), "%d-shd-status",index);
+ snprintf (key, sizeof (key), "%d-shd-status", (*index));
ret = dict_set_str (dict, key, "off");
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
@@ -6285,58 +6281,29 @@ fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo,
goto out;
}
- index++;
+ (*index)++;
}
out:
return ret;
}
-
-static int
-glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected,
- dict_t *rsp_dict)
+int
+glusterd_shd_select_brick_xlator (dict_t *dict, gf_xl_afr_op_t heal_op,
+ glusterd_volinfo_t *volinfo, int *index,
+ int *hxlator_count, dict_t *rsp_dict)
{
int ret = -1;
- char *volname = NULL;
glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
xlator_t *this = NULL;
char msg[2048] = {0,};
glusterd_pending_node_t *pending_node = NULL;
- gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
- int hxlator_count = 0;
this = THIS;
GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "volume name get failed");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof (msg), "Volume %s does not exist",
- volname);
-
- *op_errstr = gf_strdup (msg);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", msg);
- goto out;
- }
-
- ret = dict_get_int32 (dict, "heal-op", (int32_t*)&heal_op);
- if (ret || (heal_op == GF_SHD_OP_INVALID)) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "heal op invalid");
- goto out;
- }
switch (heal_op) {
case GF_SHD_OP_INDEX_SUMMARY:
@@ -6352,6 +6319,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
ret = fill_shd_status_for_local_bricks (rsp_dict,
volinfo,
ALL_HEAL_XL,
+ index,
dict);
if (ret)
gf_msg (this->name, GF_LOG_ERROR, 0,
@@ -6373,6 +6341,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
ret = fill_shd_status_for_local_bricks (rsp_dict,
volinfo,
PER_HEAL_XL,
+ index,
dict);
if (ret)
gf_msg (this->name, GF_LOG_ERROR, 0,
@@ -6391,35 +6360,104 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
switch (heal_op) {
case GF_SHD_OP_HEAL_FULL:
- hxlator_count = _select_hxlators_for_full_self_heal (this,
- volinfo,
- dict);
+ _select_hxlators_for_full_self_heal (this, volinfo, dict,
+ index, hxlator_count);
break;
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
- hxlator_count = _select_hxlator_with_matching_brick (this,
- volinfo,
- dict);
+ (*hxlator_count) += _select_hxlator_with_matching_brick (this,
+ volinfo,
+ dict,
+ index);
break;
default:
- hxlator_count = _select_hxlators_with_local_bricks (this,
- volinfo,
- dict);
+ _select_hxlators_with_local_bricks (this, volinfo, dict,
+ index, hxlator_count);
break;
}
+ ret = (*hxlator_count);
+out:
+ return ret;
+}
- if (!hxlator_count)
+
+static int
+glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected,
+ dict_t *rsp_dict)
+{
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *dup_volinfo = NULL;
+ xlator_t *this = NULL;
+ char msg[2048] = {0,};
+ glusterd_pending_node_t *pending_node = NULL;
+ gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
+ int hxlator_count = 0;
+ int index = 0;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_DICT_GET_FAILED, "volume name get failed");
goto out;
- if (hxlator_count == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_XLATOR_COUNT_GET_FAIL, "Could not determine the"
- "translator count");
- ret = -1;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (msg, sizeof (msg), "Volume %s does not exist",
+ volname);
+
+ *op_errstr = gf_strdup (msg);
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_VOL_NOT_FOUND, "%s", msg);
goto out;
}
- ret = dict_set_int32 (dict, "count", hxlator_count);
- if (ret)
+ ret = dict_get_int32 (dict, "heal-op", (int32_t *)&heal_op);
+ if (ret || (heal_op == GF_SHD_OP_INVALID)) {
+ gf_msg ("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_DICT_GET_FAILED, "heal op invalid");
goto out;
+ }
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ ret = glusterd_create_sub_tier_volinfo (volinfo, &dup_volinfo,
+ _gf_false, volname);
+ if (ret < 0)
+ goto out;
+
+ ret = glusterd_shd_select_brick_xlator (dict, heal_op,
+ dup_volinfo,
+ &index, &hxlator_count,
+ rsp_dict);
+ glusterd_volinfo_delete (dup_volinfo);
+ if (ret < 0)
+ goto out;
+ ret = glusterd_create_sub_tier_volinfo (volinfo, &dup_volinfo,
+ _gf_true, volname);
+ if (ret < 0)
+ goto out;
+ ret = glusterd_shd_select_brick_xlator (dict, heal_op,
+ dup_volinfo,
+ &index, &hxlator_count,
+ rsp_dict);
+ glusterd_volinfo_delete (dup_volinfo);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = glusterd_shd_select_brick_xlator (dict, heal_op,
+ volinfo,
+ &index, &hxlator_count,
+ rsp_dict);
+ if (ret < 0)
+ goto out;
+ }
pending_node = GF_CALLOC (1, sizeof (*pending_node),
gf_gld_mt_pending_node_t);
@@ -6433,6 +6471,21 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
pending_node = NULL;
}
+ if (!hxlator_count)
+ goto out;
+ if (hxlator_count == -1) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_XLATOR_COUNT_GET_FAIL, "Could not determine the"
+ "translator count");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_int32 (dict, "count", hxlator_count);
+ if (ret)
+ goto out;
+
+
out:
gf_msg_debug (THIS->name, 0, "Returning ret %d", ret);
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 39518d1b534..e368147584f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -6368,14 +6368,24 @@ out:
return ret;
}
+static inline int
+is_replica_volume (int type)
+{
+ if (type == GF_CLUSTER_TYPE_REPLICATE ||
+ type == GF_CLUSTER_TYPE_STRIPE_REPLICATE)
+ return 1;
+ return 0;
+}
gf_boolean_t
glusterd_is_volume_replicate (glusterd_volinfo_t *volinfo)
{
gf_boolean_t replicates = _gf_false;
- if (volinfo && ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) ||
- (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE)))
- replicates = _gf_true;
- return replicates;
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ replicates = is_replica_volume (volinfo->tier_info.cold_type) |
+ is_replica_volume (volinfo->tier_info.hot_type);
+ return replicates;
+ }
+ return is_replica_volume ((volinfo->type));
}
gf_boolean_t
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 09bc94621f4..a13e70eea35 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -19,6 +19,7 @@
#include "logging.h"
#include "dict.h"
#include "graph-utils.h"
+#include "common-utils.h"
#include "glusterd-store.h"
#include "glusterd-hooks.h"
#include "trie.h"
@@ -3290,6 +3291,7 @@ volgen_graph_build_afr_clusters (volgen_graph_t *graph,
if (volinfo->tier_info.cold_type == GF_CLUSTER_TYPE_REPLICATE)
start_count = volinfo->tier_info.cold_brick_count /
volinfo->tier_info.cold_replica_count;
+
if (volinfo->tier_info.cur_tier_hot)
clusters = volgen_link_bricks_from_list_head_start (graph,
volinfo,
@@ -3567,6 +3569,7 @@ volume_volgen_graph_build_clusters_tier (volgen_graph_t *graph,
start_count = volinfo->tier_info.cold_brick_count /
volinfo->tier_info.cold_replica_count;
}
+
if (volinfo->dist_leaf_count != 1) {
ret = volgen_link_bricks_from_list_head_start
(graph, volinfo,
@@ -4222,11 +4225,11 @@ nfs_option_handler (volgen_graph_t *graph,
}
char*
-volgen_get_shd_key (glusterd_volinfo_t *volinfo)
+volgen_get_shd_key (int type)
{
char *key = NULL;
- switch (volinfo->type) {
+ switch (type) {
case GF_CLUSTER_TYPE_REPLICATE:
case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
key = "cluster.self-heal-daemon";
@@ -4234,9 +4237,6 @@ volgen_get_shd_key (glusterd_volinfo_t *volinfo)
case GF_CLUSTER_TYPE_DISPERSE:
key = "cluster.disperse-self-heal-daemon";
break;
- case GF_CLUSTER_TYPE_TIER:
- key = "cluster.tier-self-heal-daemon";
- break;
default:
key = NULL;
break;
@@ -4274,22 +4274,51 @@ volgen_graph_set_iam_shd (volgen_graph_t *graph)
}
static int
+prepare_shd_volume_options_for_tier (glusterd_volinfo_t *volinfo,
+ dict_t *set_dict)
+{
+ int ret = -1;
+ char *key = NULL;
+
+ key = volgen_get_shd_key (volinfo->tier_info.cold_type);
+ if (key) {
+ ret = dict_set_str (set_dict, key, "enable");
+ if (ret)
+ goto out;
+ }
+
+ key = volgen_get_shd_key (volinfo->tier_info.hot_type);
+ if (key) {
+ ret = dict_set_str (set_dict, key, "enable");
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int
prepare_shd_volume_options (glusterd_volinfo_t *volinfo,
dict_t *mod_dict, dict_t *set_dict)
{
char *key = NULL;
int ret = 0;
- key = volgen_get_shd_key (volinfo);
- if (!key) {
- ret = -1;
- goto out;
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ ret = prepare_shd_volume_options_for_tier (volinfo, set_dict);
+ if (ret)
+ goto out;
+ } else {
+ key = volgen_get_shd_key (volinfo->type);
+ if (!key) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_str (set_dict, key, "enable");
+ if (ret)
+ goto out;
}
- ret = dict_set_str (set_dict, key, "enable");
- if (ret)
- goto out;
-
ret = dict_set_uint32 (set_dict, "trusted-client", GF_CLIENT_TRUSTED);
if (ret)
goto out;
@@ -4400,6 +4429,42 @@ out:
return clusters;
}
+gf_boolean_t
+is_self_heal_enabled (glusterd_volinfo_t *volinfo, dict_t *dict)
+{
+
+ char *shd_key = NULL;
+ gf_boolean_t shd_enabled = _gf_false;
+
+ GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
+
+ switch (volinfo->type) {
+ case GF_CLUSTER_TYPE_REPLICATE:
+ case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
+ case GF_CLUSTER_TYPE_DISPERSE:
+ shd_key = volgen_get_shd_key (volinfo->type);
+ shd_enabled = dict_get_str_boolean (dict, shd_key,
+ _gf_true);
+ break;
+ case GF_CLUSTER_TYPE_TIER:
+ shd_key = volgen_get_shd_key (volinfo->tier_info.cold_type);
+ if (shd_key)
+ shd_enabled = dict_get_str_boolean (dict, shd_key,
+ _gf_true);
+
+ shd_key = volgen_get_shd_key (volinfo->tier_info.hot_type);
+ if (shd_key)
+ shd_enabled |= dict_get_str_boolean (dict, shd_key,
+ _gf_true);
+
+ break;
+ default:
+ break;
+ }
+out:
+ return shd_enabled;
+}
+
static int
build_rebalance_volfile (glusterd_volinfo_t *volinfo, char *filepath,
dict_t *mod_dict)
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.h b/xlators/mgmt/glusterd/src/glusterd-volgen.h
index 2dccfca8407..f5f922475f0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.h
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.h
@@ -280,11 +280,12 @@ gd_is_boolean_option (char *key);
char*
-volgen_get_shd_key (glusterd_volinfo_t *volinfo);
+volgen_get_shd_key (int type);
int
glusterd_volopt_validate (glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
char *value, char **op_errstr);
-
+gf_boolean_t
+is_self_heal_enabled (glusterd_volinfo_t *volinfo, dict_t *dict);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index a07d7612576..b00539d4efa 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -669,14 +669,73 @@ out:
return ret;
}
-
int
glusterd_handle_cli_delete_volume (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req,
__glusterd_handle_cli_delete_volume);
}
+static char*
+_get_shd_key (int type)
+{
+ char *key = NULL;
+
+ switch (type) {
+ case GF_CLUSTER_TYPE_REPLICATE:
+ case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
+ key = "cluster.self-heal-daemon";
+ break;
+ case GF_CLUSTER_TYPE_DISPERSE:
+ key = "cluster.disperse-self-heal-daemon";
+ break;
+ default:
+ key = NULL;
+ break;
+ }
+ return key;
+}
+static int
+glusterd_handle_shd_option_for_tier (glusterd_volinfo_t *volinfo,
+ char *value, dict_t *dict)
+{
+ int count = 0;
+ char dict_key[1024] = {0, };
+ char *key = NULL;
+ int ret = 0;
+
+ key = _get_shd_key (volinfo->tier_info.cold_type);
+ if (key) {
+ count++;
+ snprintf (dict_key, sizeof (dict_key), "key%d", count);
+ ret = dict_set_str (dict, dict_key, key);
+ if (ret)
+ goto out;
+ snprintf (dict_key, sizeof (dict_key), "value%d", count);
+ ret = dict_set_str (dict, dict_key, value);
+ if (ret)
+ goto out;
+ }
+
+ key = _get_shd_key (volinfo->tier_info.hot_type);
+ if (key) {
+ count++;
+ snprintf (dict_key, sizeof (dict_key), "key%d", count);
+ ret = dict_set_str (dict, dict_key, key);
+ if (ret)
+ goto out;
+ snprintf (dict_key, sizeof (dict_key), "value%d", count);
+ ret = dict_set_str (dict, dict_key, value);
+ if (ret)
+ goto out;
+ }
+
+ ret = dict_set_int32 (dict, "count", count);
+ if (ret)
+ goto out;
+out:
+ return ret;
+}
static int
glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
glusterd_volinfo_t *volinfo)
@@ -699,23 +758,31 @@ glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
goto out;
}
- key = volgen_get_shd_key (volinfo);
+ if (heal_op == GF_SHD_OP_HEAL_ENABLE) {
+ value = "enable";
+ } else if (heal_op == GF_SHD_OP_HEAL_DISABLE) {
+ value = "disable";
+ }
+
+ /* Convert this command to volume-set command based on volume type */
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ ret = glusterd_handle_shd_option_for_tier (volinfo, value,
+ dict);
+ if (!ret)
+ goto set_volume;
+ goto out;
+ }
+
+ key = volgen_get_shd_key (volinfo->type);
if (!key) {
ret = -1;
goto out;
}
- /* Convert this command to volume-set command based on volume type */
ret = dict_set_str (dict, "key1", key);
if (ret)
goto out;
- if (heal_op == GF_SHD_OP_HEAL_ENABLE) {
- value = "enable";
- } else if (heal_op == GF_SHD_OP_HEAL_DISABLE) {
- value = "disable";
- }
-
ret = dict_set_str (dict, "value1", value);
if (ret)
goto out;
@@ -724,6 +791,7 @@ glusterd_handle_heal_enable_disable (rpcsvc_request_t *req, dict_t *dict,
if (ret)
goto out;
+set_volume:
ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict);
out:
@@ -1822,9 +1890,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
ret = 0;
goto out;
}
-
- enabled = dict_get_str_boolean (opt_dict, volgen_get_shd_key (volinfo),
- 1);
+ enabled = is_self_heal_enabled (volinfo, opt_dict);
if (!enabled) {
ret = -1;
snprintf (msg, sizeof (msg), "Self-heal-daemon is "
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 8fdee165c68..58405b67363 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -412,6 +412,16 @@ validate_disperse_heal_enable_disable (glusterd_volinfo_t *volinfo,
char **op_errstr)
{
int ret = 0;
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ if (volinfo->tier_info.cold_type != GF_CLUSTER_TYPE_DISPERSE &&
+ volinfo->tier_info.hot_type != GF_CLUSTER_TYPE_DISPERSE) {
+ gf_asprintf (op_errstr, "Volume %s is not containing "
+ "disperse type", volinfo->volname);
+
+ return -1;
+ } else
+ return 0;
+ }
if (volinfo->type != GF_CLUSTER_TYPE_DISPERSE) {
gf_asprintf (op_errstr, "Volume %s is not of disperse type",