From 16e71bf8b76eb421e30f5fe239601ba85710c983 Mon Sep 17 00:00:00 2001 From: Kaushal M Date: Thu, 6 Feb 2014 13:04:32 +0530 Subject: glusterd: Add a barrier brick-op This patch introduces a new 'barrier' brick-op which will be used to activate/deactivate the barriering on the bricks. This includes barriering in the barrier xlator and in the changelog xlator. All the required code has been including a bricks select function, a payload builder and a brick-op handler. Change-Id: I91d9d77f691c2e89823f7dc4e84900ec40dc4dd2 BUG: 1060002 Signed-off-by: Kaushal M Reviewed-on: http://review.gluster.org/6943 Reviewed-by: Krishnan Parthasarathi Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- glusterfsd/src/glusterfsd-mgmt.c | 111 +++++++++++++++++++++++++++ rpc/rpc-lib/src/protocol-common.h | 1 + xlators/mgmt/glusterd/src/glusterd-op-sm.c | 66 ++++++++++++++++ xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 1 + xlators/mgmt/glusterd/src/glusterd-syncop.c | 3 +- xlators/mgmt/glusterd/src/glusterd.h | 1 + 6 files changed, 182 insertions(+), 1 deletion(-) diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index c42228a047a..b1f00691c8c 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -1205,6 +1205,116 @@ out: return ret; } + +int +glusterfs_handle_barrier (rpcsvc_request_t *req) +{ + int ret = -1; + gd1_mgmt_brick_op_req brick_req = {0,}; + gd1_mgmt_brick_op_rsp brick_rsp = {0,}; + glusterfs_ctx_t *ctx = NULL; + glusterfs_graph_t *active = NULL; + xlator_t *any = NULL; + xlator_t *xlator = NULL; + xlator_t *old_THIS = NULL; + dict_t *dict = NULL; + char name[1024] = {0,}; + + GF_ASSERT (req); + + ret = xdr_to_generic(req->msg[0], &brick_req, + (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + ctx = glusterfsd_ctx; + GF_ASSERT (ctx); + active = ctx->active; + any = active->first; + + dict = dict_new(); + if (!dict) { + ret = -1; + goto out; + } + + ret = dict_unserialize(brick_req.input.input_val, + brick_req.input.input_len, &dict); + if (ret < 0) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to unserialize " + "request dictionary"); + goto out; + } + + brick_rsp.op_ret = 0; + brick_rsp.op_errstr = ""; // initing to prevent serilaztion failures + old_THIS = THIS; + + /* Send barrier request to the barrier xlator */ + snprintf (name, sizeof (name), "%s-barrier", brick_req.name); + xlator = xlator_search_by_name(any, name); + if (!xlator) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + name); + goto out; + } + + THIS = xlator; + // TODO: Extend this to accept return of errnos + ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_OP, dict); + if (ret) { + brick_rsp.op_ret = ret; + brick_rsp.op_errstr = gf_strdup ("Failed to reconfigure " + "barrier."); + goto submit_reply; + } + + /* Reset THIS so that we have it correct in case of an error below + */ + THIS = old_THIS; + + /* Send barrier request to changelog as well */ + /* Commenting out the below code till the changelog changes are merged + + memset (name, 0, sizeof (name)); + snprintf (name, sizeof (name), "%s-changelog", brick_req.name); + xlator = xlator_search_by_name(any, name); + if (!xlator) { + ret = -1; + gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", + name); + goto out; + } + + THIS = xlator; + ret = xlator->reconfigure (xlator, dict); + + + if (ret) { + brick_rsp.op_ret = ret; + brick_rsp.op_errstr = gf_strdup ("Failed to reconfigure " + "changelog."); + goto submit_reply; + } + */ +submit_reply: + THIS = old_THIS; + + ret = glusterfs_submit_reply (req, &brick_rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp); + +out: + if (dict) + dict_unref (dict); + free (brick_req.input.input_val); + + gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + int glusterfs_handle_rpc_msg (rpcsvc_request_t *req) { @@ -1270,6 +1380,7 @@ rpcsvc_actor_t glusterfs_actors[] = { [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE, glusterfs_handle_nfs_profile, NULL, 0, DRC_NA}, [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_node_status, NULL, 0, DRC_NA}, [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP", GLUSTERD_VOLUME_BARRIER_OP, glusterfs_handle_volume_barrier_op, NULL, 0, DRC_NA}, + [GLUSTERD_BRICK_BARRIER] = {"BARRIER", GLUSTERD_BRICK_BARRIER, glusterfs_handle_barrier, NULL, 0, DRC_NA}, }; struct rpcsvc_program glusterfs_mop_prog = { diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h index e4f7fbf3ad8..25b0085b37f 100644 --- a/rpc/rpc-lib/src/protocol-common.h +++ b/rpc/rpc-lib/src/protocol-common.h @@ -202,6 +202,7 @@ enum glusterd_brick_procnum { GLUSTERD_NODE_PROFILE, GLUSTERD_NODE_STATUS, GLUSTERD_VOLUME_BARRIER_OP, + GLUSTERD_BRICK_BARRIER, GLUSTERD_BRICK_MAXVALUE, }; diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 9b130b4c6b3..baf54def971 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -531,6 +531,18 @@ glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickin brick_req->name = gf_strdup (name); break; + case GD_OP_BARRIER: + brick_req = GF_CALLOC (1, sizeof(*brick_req), + gf_gld_mt_mop_brick_req_t); + if (!brick_req) + goto out; + brick_req->op = GLUSTERD_BRICK_BARRIER; + ret = dict_get_str(dict, "volname", &volname); + if (ret) + goto out; + brick_req->name = gf_strdup (volname); + break; + default: goto out; break; @@ -5726,6 +5738,56 @@ out: return ret; } +/* Select the bricks to send the barrier request to. + * This selects the bricks of the given volume which are present on this peer + * and are running + */ +static int +glusterd_bricks_select_barrier (dict_t *dict, struct list_head *selected) +{ + int ret = -1; + char *volname = NULL; + glusterd_volinfo_t *volinfo = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_pending_node_t *pending_node = NULL; + + GF_ASSERT (dict); + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to get volname"); + goto out; + } + + ret = glusterd_volinfo_find (volname, &volinfo); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, "Failed to find volume %s", + volname); + goto out; + } + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (uuid_compare (brickinfo->uuid, MY_UUID) || + !glusterd_is_brick_started (brickinfo)) { + continue; + } + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + pending_node->node = brickinfo; + pending_node->type = GD_NODE_BRICK; + list_add_tail (&pending_node->list, selected); + pending_node = NULL; + } + +out: + gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret); + return ret; +} + static int glusterd_op_ac_send_brick_op (glusterd_op_sm_event_t *event, void *ctx) { @@ -5878,6 +5940,10 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr, ret = glusterd_bricks_select_rebalance_volume (dict, op_errstr, selected); break; + + case GD_OP_BARRIER: + ret = glusterd_bricks_select_barrier (dict, selected); + break; case GD_OP_SNAP: ret = glusterd_bricks_select_snap (dict, op_errstr, selected); break; diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index babd5a3be5d..216806f4457 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -138,6 +138,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, case GD_OP_HEAL_VOLUME: case GD_OP_QUOTA: case GD_OP_SNAP: + case GD_OP_BARRIER: { /*nothing specific to be done*/ break; diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index b36d6f61680..d385cf9eede 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -1451,7 +1451,8 @@ gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, } INIT_LIST_HEAD (&selected); - ret = glusterd_op_bricks_select (op, req_dict, op_errstr, &selected, rsp_dict); + ret = glusterd_op_bricks_select (op, req_dict, op_errstr, &selected, + rsp_dict); if (ret) { gf_log (this->name, GF_LOG_ERROR, "%s", (*op_errstr)? *op_errstr: "Brick op failed. Check " diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index b53d8e41232..e4e29ad9c62 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -108,6 +108,7 @@ typedef enum glusterd_op_ { GD_OP_SYS_EXEC, GD_OP_GSYNC_CREATE, GD_OP_SNAP, + GD_OP_BARRIER, GD_OP_MAX, } glusterd_op_t; -- cgit