From c3bdb1d4c6c4491afdf1ba26bb0d6204270cc058 Mon Sep 17 00:00:00 2001 From: Amar Tumballi Date: Tue, 20 Dec 2011 14:54:35 +0530 Subject: glusterd: bring in feature to use syncop for mgmt ops * new sycnop routines added to mgmt program * one should not use 'glusterd_op_begin()', instead can use the synctask framework, 'glusterd_op_begin_synctask()' * currently using for below operations: 'volume start', 'volume rebalance', 'volume quota', 'volume replace-brick' and 'volume add-brick' Change-Id: I0bee76d06790d5c5bb5db15d443b44af0e21f1c0 BUG: 762935 Signed-off-by: Amar Tumballi Reviewed-on: http://review.gluster.com/479 Tested-by: Gluster Build System Reviewed-by: Anand Avati --- xlators/mgmt/glusterd/src/Makefile.am | 5 +- xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 4 +- xlators/mgmt/glusterd/src/glusterd-handler.c | 167 +++++++++ xlators/mgmt/glusterd/src/glusterd-quota.c | 2 +- xlators/mgmt/glusterd/src/glusterd-rebalance.c | 9 +- xlators/mgmt/glusterd/src/glusterd-replace-brick.c | 2 +- xlators/mgmt/glusterd/src/glusterd-syncop.c | 392 +++++++++++++++++++++ xlators/mgmt/glusterd/src/glusterd-syncop.h | 54 +++ xlators/mgmt/glusterd/src/glusterd-utils.c | 50 ++- xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 11 +- xlators/mgmt/glusterd/src/glusterd.c | 3 + xlators/mgmt/glusterd/src/glusterd.h | 11 + 12 files changed, 680 insertions(+), 30 deletions(-) create mode 100644 xlators/mgmt/glusterd/src/glusterd-syncop.c create mode 100644 xlators/mgmt/glusterd/src/glusterd-syncop.h (limited to 'xlators/mgmt/glusterd/src') diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am index 817f0273561..8a9248daa25 100644 --- a/xlators/mgmt/glusterd/src/Makefile.am +++ b/xlators/mgmt/glusterd/src/Makefile.am @@ -6,7 +6,8 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c glusterd-op-sm glusterd-utils.c glusterd-rpc-ops.c glusterd-store.c glusterd-handshake.c \ glusterd-pmap.c glusterd-volgen.c glusterd-rebalance.c glusterd-quota.c \ glusterd-geo-rep.c glusterd-replace-brick.c glusterd-log-ops.c \ - glusterd-volume-ops.c glusterd-brick-ops.c glusterd-mountbroker.c + glusterd-volume-ops.c glusterd-brick-ops.c glusterd-mountbroker.c \ + glusterd-syncop.c glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \ $(top_builddir)/rpc/xdr/src/libgfxdr.la \ @@ -14,7 +15,7 @@ glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h glusterd-sm.h \ glusterd-store.h glusterd-mem-types.h glusterd-pmap.h glusterd-volgen.h \ - glusterd-mountbroker.h + glusterd-mountbroker.h glusterd-syncop.h AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\ -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS)\ diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c index fc9c9cf0a93..e74dedc3a84 100644 --- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c @@ -307,8 +307,6 @@ out: return ret; } - - /* Handler functions */ int glusterd_handle_add_brick (rpcsvc_request_t *req) @@ -496,7 +494,7 @@ brick_val: "failed to set the new type in dict"); } - ret = glusterd_op_begin (req, GD_OP_ADD_BRICK, dict); + ret = glusterd_op_begin_synctask (req, GD_OP_ADD_BRICK, dict); out: gf_cmd_log ("Volume add-brick","on volname: %s %s", volname, diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 86bc0d7b1c6..0ee3bfd2440 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -57,6 +57,9 @@ #include "defaults.c" #include "common-utils.h" +#include "globals.h" +#include "glusterd-syncop.h" + static int glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, char *hostname, int port, @@ -944,6 +947,140 @@ out: glusterd_friend_sm (); glusterd_op_sm (); + + return ret; +} + +int +gd_sync_task_begin (void *data) +{ + int ret = -1; + dict_t *dict = NULL; + dict_t *rsp_dict = NULL; + glusterd_peerinfo_t *peerinfo = NULL; + glusterd_conf_t *conf = NULL; + uuid_t tmp_uuid = {0,}; + char *errstr = NULL; + glusterd_op_t op = 0; + int32_t tmp_op = 0; + gf_boolean_t local_locked = _gf_false; + + conf = THIS->private; + + dict = data; + + ret = dict_get_int32 (dict, "operation", &tmp_op); + if (ret) + goto out; + + op = tmp_op; + + ret = -1; + rsp_dict = dict_new (); + if (!rsp_dict) + goto out; + + /* Lock everything */ + ret = glusterd_lock (conf->uuid); + if (ret) + goto out; + /* successful lock in local node */ + local_locked = _gf_true; + + list_for_each_entry (peerinfo, &conf->peers, uuid_list) { + ret = gd_syncop_mgmt_lock (peerinfo->rpc, + conf->uuid, tmp_uuid); + if (ret) + goto out; + /* TODO: Only on lock successful nodes it should unlock */ + } + + /* stage op */ + ret = glusterd_op_stage_validate (op, dict, &errstr, rsp_dict); + if (ret) + goto out; + + list_for_each_entry (peerinfo, &conf->peers, uuid_list) { + ret = gd_syncop_mgmt_stage_op (peerinfo->rpc, + conf->uuid, tmp_uuid, + op, dict, &rsp_dict, &errstr); + if (ret) { + if (errstr) + ret = dict_set_dynstr (dict, "error", errstr); + + ret = -1; + goto out; + } + } + + /* commit op */ + ret = glusterd_op_commit_perform (op, dict, &errstr, rsp_dict); + if (ret) + goto out; + + list_for_each_entry (peerinfo, &conf->peers, uuid_list) { + ret = gd_syncop_mgmt_commit_op (peerinfo->rpc, + conf->uuid, tmp_uuid, + op, dict, &rsp_dict, &errstr); + if (ret) { + if (errstr) + ret = dict_set_dynstr (dict, "error", errstr); + + ret = -1; + goto out; + } + } + + ret = 0; +out: + if (local_locked) { + /* unlock everything as we help successful local lock */ + list_for_each_entry (peerinfo, &conf->peers, uuid_list) { + /* No need to check the error code, as it is possible + that if 'lock' on few nodes failed, it would come + here, and unlock would fail on nodes where lock + never was sent */ + gd_syncop_mgmt_unlock (peerinfo->rpc, + conf->uuid, tmp_uuid); + } + + /* Local node should be the one to be locked first, + unlocked last to prevent races */ + glusterd_unlock (conf->uuid); + } + + if (rsp_dict) + dict_unref (rsp_dict); + + return ret; +} + +int +gd_sync_task_completion (int op_ret, call_frame_t *sync_frame, void *data) +{ + int ret = 0; + dict_t *dict = NULL; + rpcsvc_request_t *req = NULL; + int32_t tmp_op = 0; + glusterd_op_t op = 0; + + dict = data; + + req = sync_frame->local; + sync_frame->local = NULL; + + ret = dict_get_int32 (dict, "operation", &tmp_op); + if (ret) + goto out; + op = tmp_op; + + ret = glusterd_op_send_cli_response (op, op_ret, 0, req, NULL, + "operation failed"); + +out: + if (dict) + dict_unref (dict); + return ret; } @@ -958,6 +1095,36 @@ glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx) } +int32_t +glusterd_op_begin_synctask (rpcsvc_request_t *req, glusterd_op_t op, + void *dict) +{ + int ret = 0; + call_frame_t *dummy_frame = NULL; + glusterfs_ctx_t *ctx = NULL; + + dummy_frame = create_frame (THIS, THIS->ctx->pool); + if (!dummy_frame) + goto out; + + dummy_frame->local = req; + + ret = dict_set_int32 (dict, "operation", op); + if (ret) { + gf_log (THIS->name, GF_LOG_ERROR, + "dict set failed for setting operations"); + goto out; + } + + ctx = glusterfs_ctx_get (); + + ret = synctask_new (ctx->env, gd_sync_task_begin, + gd_sync_task_completion, + dummy_frame, dict); +out: + return ret; +} + int glusterd_handle_reset_volume (rpcsvc_request_t *req) diff --git a/xlators/mgmt/glusterd/src/glusterd-quota.c b/xlators/mgmt/glusterd/src/glusterd-quota.c index b063421ec1a..c0c2e23dbd8 100644 --- a/xlators/mgmt/glusterd/src/glusterd-quota.c +++ b/xlators/mgmt/glusterd/src/glusterd-quota.c @@ -102,7 +102,7 @@ glusterd_handle_quota (rpcsvc_request_t *req) break; } gf_cmd_log ("volume quota", " %s command on %s", operation, volname); - ret = glusterd_op_begin (req, GD_OP_QUOTA, dict); + ret = glusterd_op_begin_synctask (req, GD_OP_QUOTA, dict); gf_cmd_log ("volume quota", " %s command on %s %s", operation,volname, (ret != 0)? "FAILED" : "SUCCEEDED"); diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c index 36435868602..f562e1b8007 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c +++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c @@ -454,11 +454,10 @@ glusterd_handle_defrag_volume (rpcsvc_request_t *req) if ((cmd == GF_DEFRAG_CMD_STATUS) || (cmd == GF_DEFRAG_CMD_STOP)) { - ret = glusterd_op_begin (req, GD_OP_DEFRAG_BRICK_VOLUME, - dict); - } - else - ret = glusterd_op_begin (req, GD_OP_REBALANCE, dict); + ret = glusterd_op_begin_synctask (req, GD_OP_DEFRAG_BRICK_VOLUME, + dict); + } else + ret = glusterd_op_begin_synctask (req, GD_OP_REBALANCE, dict); out: diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c index ca127f7a8ea..0aba0109e16 100644 --- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c +++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c @@ -149,7 +149,7 @@ glusterd_handle_replace_brick (rpcsvc_request_t *req) " dst_brick:%s op:%s", volname, src_brick, dst_brick, operation); - ret = glusterd_op_begin (req, GD_OP_REPLACE_BRICK, dict); + ret = glusterd_op_begin_synctask (req, GD_OP_REPLACE_BRICK, dict); gf_cmd_log ("Volume replace-brick","on volname: %s %s", volname, (ret) ? "FAILED" : "SUCCESS"); diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c new file mode 100644 index 00000000000..c1fdde19aae --- /dev/null +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -0,0 +1,392 @@ +/* + Copyright (c) 2012 Red Hat, Inc. + This file is part of GlusterFS. + + GlusterFS is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3 of the License, + or (at your option) any later version. + + GlusterFS is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see + . +*/ + +/* rpc related syncops */ +#include "syncop.h" +#include "rpc-clnt.h" +#include "protocol-common.h" +#include "xdr-generic.h" +#include "glusterd1-xdr.h" +#include "glusterd-syncop.h" + +int +gd_syncop_submit_request (struct rpc_clnt *rpc, void *req, + void *cookie, rpc_clnt_prog_t *prog, + int procnum, fop_cbk_fn_t cbkfn, xdrproc_t xdrproc) +{ + int ret = -1; + struct iobuf *iobuf = NULL; + struct iobref *iobref = NULL; + int count = 0; + struct iovec iov = {0, }; + ssize_t req_size = 0; + + GF_ASSERT (rpc); + if (!req) + goto out; + + req_size = xdr_sizeof (xdrproc, req); + iobuf = iobuf_get2 (rpc->ctx->iobuf_pool, req_size); + if (!iobuf) + goto out; + + iobref = iobref_new (); + if (!iobref) + goto out; + + iobref_add (iobref, iobuf); + + iov.iov_base = iobuf->ptr; + iov.iov_len = iobuf_pagesize (iobuf); + + /* Create the xdr payload */ + ret = xdr_serialize_generic (iov, req, xdrproc); + if (ret == -1) + goto out; + + iov.iov_len = ret; + count = 1; + + /* Send the msg */ + ret = rpc_clnt_submit (rpc, prog, procnum, cbkfn, + &iov, count, NULL, 0, iobref, + (call_frame_t *)cookie, NULL, 0, NULL, 0, NULL); + + /* TODO: do we need to start ping also? */ + +out: + iobref_unref (iobref); + iobuf_unref (iobuf); + + return ret; +} + +/* Defined in glusterd-rpc-ops.c */ +extern struct rpc_clnt_program gd_mgmt_prog; + +int32_t +gd_syncop_mgmt_lock_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *cookie) +{ + struct syncargs *args = NULL; + gd1_mgmt_cluster_lock_rsp rsp = {{0},}; + int ret = -1; + + args = cookie; + + /* initialize */ + args->op_ret = -1; + args->op_errno = EINVAL; + + if (-1 == req->rpc_status) { + args->op_errno = ENOTCONN; + goto out; + } + + ret = xdr_to_generic (*iov, &rsp, + (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp); + if (ret < 0) { + goto out; + } + + args->op_ret = rsp.op_ret; + args->op_errno = rsp.op_errno; + + uuid_copy (args->uuid, rsp.uuid); + +out: + __wake (args); + + return 0; +} + + +int +gd_syncop_mgmt_lock (struct rpc_clnt *rpc, uuid_t my_uuid, uuid_t recv_uuid) +{ + struct syncargs args = {0, }; + gd1_mgmt_cluster_lock_req req = {{0},}; + + uuid_copy (req.uuid, my_uuid); + + args.op_ret = -1; + args.op_errno = ENOTCONN; + + GD_SYNCOP (rpc, (&args), gd_syncop_mgmt_lock_cbk, + &req, &gd_mgmt_prog, GLUSTERD_MGMT_CLUSTER_LOCK, + xdr_gd1_mgmt_cluster_lock_req); + + if (!args.op_ret) + uuid_copy (recv_uuid, args.uuid); + + errno = args.op_errno; + return args.op_ret; + +} + +int32_t +gd_syncop_mgmt_unlock_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *cookie) +{ + struct syncargs *args = NULL; + gd1_mgmt_cluster_unlock_rsp rsp = {{0},}; + int ret = -1; + + args = cookie; + + /* initialize */ + args->op_ret = -1; + args->op_errno = EINVAL; + + if (-1 == req->rpc_status) { + args->op_errno = ENOTCONN; + goto out; + } + + ret = xdr_to_generic (*iov, &rsp, + (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp); + if (ret < 0) { + goto out; + } + + args->op_ret = rsp.op_ret; + args->op_errno = rsp.op_errno; + + uuid_copy (args->uuid, rsp.uuid); + +out: + __wake (args); + + return 0; +} + + +int +gd_syncop_mgmt_unlock (struct rpc_clnt *rpc, uuid_t my_uuid, uuid_t recv_uuid) +{ + struct syncargs args = {0, }; + gd1_mgmt_cluster_unlock_req req = {{0},}; + + uuid_copy (req.uuid, my_uuid); + + args.op_ret = -1; + args.op_errno = ENOTCONN; + + GD_SYNCOP (rpc, (&args), gd_syncop_mgmt_unlock_cbk, + &req, &gd_mgmt_prog, GLUSTERD_MGMT_CLUSTER_UNLOCK, + xdr_gd1_mgmt_cluster_unlock_req); + + if (!args.op_ret) + uuid_copy (recv_uuid, args.uuid); + + errno = args.op_errno; + return args.op_ret; + +} + +int32_t +gd_syncop_stage_op_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *cookie) +{ + struct syncargs *args = NULL; + gd1_mgmt_stage_op_rsp rsp = {{0},}; + int ret = -1; + + args = cookie; + + /* initialize */ + args->op_ret = -1; + args->op_errno = EINVAL; + + if (-1 == req->rpc_status) { + args->op_errno = ENOTCONN; + goto out; + } + + ret = xdr_to_generic (*iov, &rsp, + (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp); + if (ret < 0) { + goto out; + } + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + args->dict = dict_new (); + + ret = dict_unserialize (rsp.dict.dict_val, + rsp.dict.dict_len, + &args->dict); + if (ret < 0) { + GF_FREE (rsp.dict.dict_val); + goto out; + } else { + args->dict->extra_stdfree = rsp.dict.dict_val; + } + } + + args->op_ret = rsp.op_ret; + args->op_errno = rsp.op_errno; + + uuid_copy (args->uuid, rsp.uuid); + + args->errstr = gf_strdup (rsp.op_errstr); + +out: + __wake (args); + + return 0; +} + + +int +gd_syncop_mgmt_stage_op (struct rpc_clnt *rpc, uuid_t my_uuid, uuid_t recv_uuid, + int op, dict_t *dict_out, dict_t **dict_in, + char **errstr) +{ + struct syncargs args = {0, }; + gd1_mgmt_stage_op_req req = {{0},}; + int ret = 0; + + uuid_copy (req.uuid, my_uuid); + req.op = op; + + args.op_ret = -1; + args.op_errno = ENOTCONN; + + ret = dict_allocate_and_serialize (dict_out, &req.buf.buf_val, + (size_t *)&req.buf.buf_len); + if (ret) + goto out; + + GD_SYNCOP (rpc, (&args), gd_syncop_stage_op_cbk, + &req, &gd_mgmt_prog, GLUSTERD_MGMT_STAGE_OP, + xdr_gd1_mgmt_stage_op_req); + + if (args.errstr && errstr) + *errstr = args.errstr; + else if (args.errstr) + GF_FREE (args.errstr); + + if (args.dict && dict_in) + *dict_in = args.dict; + else if (args.dict) + dict_unref (args.dict); + + uuid_copy (recv_uuid, args.uuid); +out: + errno = args.op_errno; + return args.op_ret; + +} + +int32_t +gd_syncop_commit_op_cbk (struct rpc_req *req, struct iovec *iov, + int count, void *cookie) +{ + struct syncargs *args = NULL; + gd1_mgmt_commit_op_rsp rsp = {{0},}; + int ret = -1; + + args = cookie; + + /* initialize */ + args->op_ret = -1; + args->op_errno = EINVAL; + + if (-1 == req->rpc_status) { + args->op_errno = ENOTCONN; + goto out; + } + + ret = xdr_to_generic (*iov, &rsp, + (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp); + if (ret < 0) { + goto out; + } + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + args->dict = dict_new (); + + ret = dict_unserialize (rsp.dict.dict_val, + rsp.dict.dict_len, + &args->dict); + if (ret < 0) { + GF_FREE (rsp.dict.dict_val); + goto out; + } else { + args->dict->extra_stdfree = rsp.dict.dict_val; + } + } + + args->op_ret = rsp.op_ret; + args->op_errno = rsp.op_errno; + + uuid_copy (args->uuid, rsp.uuid); + + args->errstr = gf_strdup (rsp.op_errstr); + +out: + __wake (args); + + return 0; +} + + +int +gd_syncop_mgmt_commit_op (struct rpc_clnt *rpc, uuid_t my_uuid, uuid_t recv_uuid, + int op, dict_t *dict_out, dict_t **dict_in, + char **errstr) +{ + struct syncargs args = {0, }; + gd1_mgmt_commit_op_req req = {{0},}; + int ret = 0; + + uuid_copy (req.uuid, my_uuid); + req.op = op; + + args.op_ret = -1; + args.op_errno = ENOTCONN; + + ret = dict_allocate_and_serialize (dict_out, &req.buf.buf_val, + (size_t *)&req.buf.buf_len); + if (ret) + goto out; + + GD_SYNCOP (rpc, (&args), gd_syncop_commit_op_cbk, + &req, &gd_mgmt_prog, GLUSTERD_MGMT_COMMIT_OP, + xdr_gd1_mgmt_commit_op_req); + + if (args.errstr && errstr) + *errstr = args.errstr; + else if (args.errstr) + GF_FREE (args.errstr); + + if (args.dict && dict_in) + *dict_in = args.dict; + else if (args.dict) + dict_unref (args.dict); + + uuid_copy (recv_uuid, args.uuid); + +out: + errno = args.op_errno; + return args.op_ret; + +} diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.h b/xlators/mgmt/glusterd/src/glusterd-syncop.h new file mode 100644 index 00000000000..ccaf794ea18 --- /dev/null +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.h @@ -0,0 +1,54 @@ +/* + Copyright (c) 2012 Red Hat, Inc. + This file is part of GlusterFS. + + GlusterFS is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published + by the Free Software Foundation; either version 3 of the License, + or (at your option) any later version. + + GlusterFS is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see + . +*/ + +#ifndef __RPC_SYNCOP_H +#define __RPC_SYNCOP_H + +#include "syncop.h" + + +/* gd_syncop_* */ +#define GD_SYNCOP(rpc, stb, cbk, req, prog, procnum, xdrproc) do { \ + int ret = 0; \ + ret = gd_syncop_submit_request (rpc, req, stb, \ + prog, procnum, cbk, \ + (xdrproc_t)xdrproc); \ + if (!ret) \ + __yield (stb); \ + } while (0) + + +int gd_syncop_submit_request (struct rpc_clnt *rpc, void *req, + void *cookie, rpc_clnt_prog_t *prog, + int procnum, fop_cbk_fn_t cbkfn, + xdrproc_t xdrproc); + + +int gd_syncop_mgmt_lock (struct rpc_clnt *rpc, uuid_t my_uuid, + uuid_t recv_uuid); +int gd_syncop_mgmt_unlock (struct rpc_clnt *rpc, uuid_t my_uuid, + uuid_t recv_uuid); +int gd_syncop_mgmt_stage_op (struct rpc_clnt *rpc, uuid_t my_uuid, + uuid_t recv_uuid, int op, dict_t *dict_out, + dict_t **dict_in, char **errstr); +int gd_syncop_mgmt_commit_op (struct rpc_clnt *rpc, uuid_t my_uuid, + uuid_t recv_uuid, int op, dict_t *dict_out, + dict_t **dict_in, char **errstr); + +#endif /* __RPC_SYNCOP_H */ diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 6bfcd6c0025..68d2473c73d 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -1244,12 +1244,12 @@ glusterd_volume_start_glusterfs (glusterd_volinfo_t *volinfo, runner_log (&runner, "", GF_LOG_DEBUG, "Starting GlusterFS"); ret = runner_run (&runner); + if (ret) + goto out; - if (ret == 0) { - //pmap_registry_bind (THIS, port, brickinfo->path); - brickinfo->port = port; - brickinfo->rdma_port = rdma_port; - } + //pmap_registry_bind (THIS, port, brickinfo->path); + brickinfo->port = port; + brickinfo->rdma_port = rdma_port; connect: ret = glusterd_brick_connect (volinfo, brickinfo); @@ -3213,18 +3213,23 @@ out: return ret; } -int -glusterd_restart_bricks (glusterd_conf_t *conf) +void * +glusterd_brick_restart_proc (void *data) { - glusterd_volinfo_t *volinfo = NULL; - glusterd_brickinfo_t *brickinfo = NULL; - int ret = 0; - gf_boolean_t start_nodesvcs = _gf_false; + glusterd_conf_t *conf = NULL; + glusterd_volinfo_t *volinfo = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + gf_boolean_t start_nodesvcs = _gf_false; + + conf = data; GF_ASSERT (conf); + /* set the proper 'THIS' value as it is new thread */ + THIS = conf->xl; + list_for_each_entry (volinfo, &conf->volumes, vol_list) { - //If volume status is not started, do not proceed + /* If volume status is not started, do not proceed */ if (volinfo->status == GLUSTERD_STATUS_STARTED) { list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { @@ -3233,8 +3238,28 @@ glusterd_restart_bricks (glusterd_conf_t *conf) start_nodesvcs = _gf_true; } } + if (start_nodesvcs) glusterd_nodesvcs_handle_graph_change (NULL); + + return NULL; +} + +int +glusterd_restart_bricks (glusterd_conf_t *conf) +{ + int ret = 0; + + conf->xl = THIS; + ret = pthread_create (&conf->brick_thread, NULL, + glusterd_brick_restart_proc, + conf); + if (ret != 0) { + gf_log (THIS->name, GF_LOG_DEBUG, + "pthread_create() failed (%s)", + strerror (errno)); + } + return ret; } @@ -3255,7 +3280,6 @@ _local_gsyncd_start (dict_t *this, char *key, data_t *value, void *data) return; uuid_len = (slave - value->data - 1); - strncpy (uuid_str, (char*)value->data, uuid_len); glusterd_start_gsync (volinfo, slave, uuid_str, NULL); } diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index f0d8c70d8eb..b902d30635f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -252,7 +252,7 @@ glusterd_handle_cli_start_volume (rpcsvc_request_t *req) { int32_t ret = -1; gf_cli_req cli_req = {{0,}}; - char *dup_volname = NULL; + char *volname = NULL; dict_t *dict = NULL; glusterd_op_t cli_op = GD_OP_START_VOLUME; @@ -280,17 +280,18 @@ glusterd_handle_cli_start_volume (rpcsvc_request_t *req) } } - ret = dict_get_str (dict, "volname", &dup_volname); + ret = dict_get_str (dict, "volname", &volname); if (ret) { gf_log (THIS->name, GF_LOG_ERROR, "dict get failed"); goto out; } gf_log ("glusterd", GF_LOG_INFO, "Received start vol req" - "for volume %s", dup_volname); - ret = glusterd_op_begin (req, GD_OP_START_VOLUME, dict); + "for volume %s", volname); + + ret = glusterd_op_begin_synctask (req, GD_OP_START_VOLUME, dict); - gf_cmd_log ("volume start","on volname: %s %s", dup_volname, + gf_cmd_log ("volume start","on volname: %s %s", volname, ((ret == 0) ? "SUCCESS": "FAILED")); out: diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c index b92a08a1962..e4f04002f09 100644 --- a/xlators/mgmt/glusterd/src/glusterd.c +++ b/xlators/mgmt/glusterd/src/glusterd.c @@ -48,6 +48,8 @@ #include "common-utils.h" #include "run.h" +#include "syncop.h" + #include "glusterd-mountbroker.h" static uuid_t glusterd_uuid; @@ -966,6 +968,7 @@ init (xlator_t *this) } } #endif + this->private = conf; (void) glusterd_nodesvc_set_running ("glustershd", _gf_false); /* this->ctx->top = this;*/ diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index 44934b2fc80..ad8e0bbfa1f 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -46,6 +46,7 @@ #include "protocol-common.h" #include "glusterd-pmap.h" #include "cli1-xdr.h" +#include "syncop.h" #define GLUSTERD_MAX_VOLUME_NAME 1000 #define DEFAULT_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs" @@ -123,6 +124,8 @@ typedef struct { #ifdef DEBUG gf_boolean_t valgrind; #endif + pthread_t brick_thread; + xlator_t *xl; /* Should be set to 'THIS' before creating thread */ } glusterd_conf_t; typedef enum gf_brick_status { @@ -628,4 +631,12 @@ int glusterd_op_stop_volume_args_get (dict_t *dict, char** volname, int *flags); int glusterd_op_statedump_volume_args_get (dict_t *dict, char **volname, char **options, int *option_cnt); +/* Synctask part */ +int gd_sync_task_begin (void *data); +int gd_sync_task_completion (int op_ret, call_frame_t *sync_frame, void *data); + +int32_t glusterd_op_begin_synctask (rpcsvc_request_t *req, glusterd_op_t op, + void *dict); + + #endif -- cgit