From 03f731a8b32db7bef7c5e9ffc11c16f670ffe960 Mon Sep 17 00:00:00 2001 From: Mohammed Rafi KC Date: Mon, 23 Nov 2015 12:05:54 +0530 Subject: glusterd: Change volume start into v3 framework As part of volume start, if the volume is of tier type then we need to start tiering daemon also. But before starting tier daemon all the bricks should be started. So by changing volume start into v3 framework, we can do tier start in post validate phase Change-Id: If921067f4739e6b9a3239fc5717696eaf382c22a BUG: 1284372 Signed-off-by: Mohammed Rafi KC Reviewed-on: http://review.gluster.org/12718 Tested-by: NetBSD Build System Tested-by: Gluster Build System Reviewed-by: Avra Sengupta Reviewed-by: Atin Mukherjee --- xlators/mgmt/glusterd/src/glusterd-mgmt.c | 62 ++++++++++++++++++++++++- xlators/mgmt/glusterd/src/glusterd-op-sm.c | 2 +- xlators/mgmt/glusterd/src/glusterd-op-sm.h | 5 ++ xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 43 +++++++++++++---- 4 files changed, 100 insertions(+), 12 deletions(-) (limited to 'xlators/mgmt/glusterd/src') diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c index 2814bb05430..ab706ce6cd5 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c @@ -24,6 +24,7 @@ #include "glusterd-snapshot-utils.h" #include "glusterd-messages.h" #include "glusterd-errno.h" +#include "glusterd-hooks.h" extern struct rpc_clnt_program gd_mgmt_v3_prog; @@ -184,7 +185,17 @@ gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict, if (ret) { gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_PRE_VALIDATION_FAIL, - "Replace-brick prevalidation failed."); + "ADD-brick prevalidation failed."); + goto out; + } + break; + case GD_OP_START_VOLUME: + ret = glusterd_op_stage_start_volume (dict, op_errstr, + rsp_dict); + if (ret) { + gf_msg (this->name, GF_LOG_WARNING, 0, + GD_MSG_PRE_VALIDATION_FAIL, + "Volume start prevalidation failed."); goto out; } break; @@ -249,6 +260,7 @@ gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict, GF_VALIDATE_OR_GOTO (this->name, op_errno, out); GF_ASSERT (rsp_dict); + glusterd_op_commit_hook (op, dict, GD_COMMIT_HOOK_PRE); switch (op) { case GD_OP_SNAP: { @@ -285,6 +297,19 @@ gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict, break; } + case GD_OP_START_VOLUME: + { + ret = glusterd_op_start_volume (dict, op_errstr); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, + GD_MSG_COMMIT_OP_FAIL, + "Volume start commit failed."); + goto out; + } + break; + + } + default: break; } @@ -311,6 +336,9 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict, GF_ASSERT (op_errstr); GF_ASSERT (rsp_dict); + if (op_ret == 0) + glusterd_op_commit_hook (op, dict, GD_COMMIT_HOOK_POST); + switch (op) { case GD_OP_SNAP: { @@ -329,7 +357,7 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict, { ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, + gf_msg ("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Unable to get" " volume name"); goto out; @@ -353,6 +381,34 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict, break; } + case GD_OP_START_VOLUME: + { + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_msg ("glusterd", GF_LOG_ERROR, 0, + GD_MSG_DICT_GET_FAILED, "Unable to get" + " volume name"); + goto out; + } + + ret = glusterd_volinfo_find (volname, &volinfo); + if (ret) { + gf_msg ("glusterd", GF_LOG_ERROR, EINVAL, + GD_MSG_VOL_NOT_FOUND, "Unable to " + "allocate memory"); + goto out; + } + + if (volinfo->type == GF_CLUSTER_TYPE_TIER) { + glusterd_defrag_info_set (volinfo, dict, + GF_DEFRAG_CMD_START_TIER, + GF_DEFRAG_CMD_START, + GD_OP_REBALANCE); + glusterd_restart_rebalance_for_volume (volinfo); + } + break; + } + default: break; } @@ -611,6 +667,7 @@ glusterd_pre_validate_aggr_rsp_dict (glusterd_op_t op, goto out; } break; + case GD_OP_START_VOLUME: case GD_OP_ADD_BRICK: ret = glusterd_aggr_brick_mount_dirs (aggr, rsp); if (ret) { @@ -918,6 +975,7 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict, case GD_OP_SNAP: dict_copy (dict, req_dict); break; + case GD_OP_START_VOLUME: case GD_OP_ADD_BRICK: case GD_OP_REPLACE_BRICK: { diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 8bc47fc8c49..ec635e22577 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -4711,7 +4711,7 @@ out: return; } -static int +int glusterd_op_commit_hook (glusterd_op_t op, dict_t *op_ctx, glusterd_commit_hook_type_t type) { diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h index 67b2742520e..454181d188f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h @@ -22,6 +22,7 @@ #include "byte-order.h" #include "glusterd.h" #include "protocol-common.h" +#include "glusterd-hooks.h" #define GD_OP_PROTECTED (0x02) #define GD_OP_UNPROTECTED (0x04) @@ -167,6 +168,10 @@ typedef struct glusterd_all_volume_options { char *option; } glusterd_all_vol_opts; +int +glusterd_op_commit_hook (glusterd_op_t op, dict_t *op_ctx, + glusterd_commit_hook_type_t type); + int glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type, glusterd_op_sm_event_t **new_event); diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index d997d60cd18..72c3894ec96 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -28,6 +28,7 @@ #include "glusterd-svc-helper.h" #include "glusterd-shd-svc.h" #include "glusterd-snapd-svc.h" +#include "glusterd-mgmt.h" #include #include @@ -453,11 +454,14 @@ __glusterd_handle_cli_start_volume (rpcsvc_request_t *req) glusterd_op_t cli_op = GD_OP_START_VOLUME; char errstr[2048] = {0,}; xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; this = THIS; GF_ASSERT (this); GF_ASSERT (req); + conf = this->private; + GF_ASSERT (conf); ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { snprintf (errstr, sizeof (errstr), "Failed to decode message " @@ -497,8 +501,18 @@ __glusterd_handle_cli_start_volume (rpcsvc_request_t *req) gf_msg_debug (this->name, 0, "Received start vol req" " for volume %s", volname); - ret = glusterd_op_begin_synctask (req, GD_OP_START_VOLUME, dict); - + if (conf->op_version <= GD_OP_VERSION_3_7_6) { + gf_msg_debug (this->name, 0, "The cluster is operating at " + "version less than or equal to %d. Volume start " + "falling back to syncop framework.", + GD_OP_VERSION_3_7_6); + ret = glusterd_op_begin_synctask (req, GD_OP_START_VOLUME, + dict); + } else { + ret = glusterd_mgmt_v3_initiate_all_phases (req, + GD_OP_START_VOLUME, + dict); + } out: free (cli_req.dict.dict_val); //its malloced by xdr @@ -2544,15 +2558,26 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr) if (ret) goto out; } - - if (volinfo->type == GF_CLUSTER_TYPE_TIER) { - glusterd_defrag_info_set (volinfo, dict, - GF_DEFRAG_CMD_START_TIER, - GF_DEFRAG_CMD_START, - GD_OP_REBALANCE); - glusterd_restart_rebalance_for_volume (volinfo); + if (conf->op_version <= GD_OP_VERSION_3_7_6) { + /* + * Starting tier daemon on originator node will fail if + * atleast one of the peer host brick for the volume. + * Because The bricks in the peer haven't started when you + * commit on originator node. + * Please upgrade to version greater than GD_OP_VERSION_3_7_6 + */ + if (volinfo->type == GF_CLUSTER_TYPE_TIER) { + glusterd_defrag_info_set (volinfo, dict, + GF_DEFRAG_CMD_START_TIER, + GF_DEFRAG_CMD_START, + GD_OP_REBALANCE); + glusterd_restart_rebalance_for_volume (volinfo); + } + } else { + /* Starting tier daemon is moved into post validate phase */ } + ret = glusterd_svcs_manager (volinfo); out: -- cgit