/* Copyright (c) 2013-2014 Red Hat, Inc. This file is part of GlusterFS. This file is licensed to you under your choice of the GNU Lesser General Public License, version 3 or any later version (LGPLv3 or later), or the GNU General Public License, version 2 (GPLv2), in all cases as published by the Free Software Foundation. */ /* rpc related syncops */ #include "rpc-clnt.h" #include "protocol-common.h" #include "xdr-generic.h" #include "glusterd1-xdr.h" #include "glusterd-syncop.h" #include "glusterd.h" #include "glusterd-utils.h" #include "glusterd-locks.h" #include "glusterd-mgmt.h" #include "glusterd-op-sm.h" extern struct rpc_clnt_program gd_mgmt_v3_prog; static void gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno, char *op_errstr, int op_code, glusterd_peerinfo_t *peerinfo, u_char *uuid) { char err_str[PATH_MAX] = "Please check log file for details."; char op_err[PATH_MAX] = ""; int len = -1; char *peer_str = NULL; if (op_ret) { args->op_ret = op_ret; args->op_errno = op_errno; if (peerinfo) peer_str = peerinfo->hostname; else peer_str = uuid_utoa (uuid); if (op_errstr && strcmp (op_errstr, "")) { len = snprintf (err_str, sizeof(err_str) - 1, "Error: %s", op_errstr); err_str[len] = '\0'; } switch (op_code){ case GLUSTERD_MGMT_V3_VOLUME_LOCK: { len = snprintf (op_err, sizeof(op_err) - 1, "Locking volume failed " "on %s. %s", peer_str, err_str); break; } case GLUSTERD_MGMT_V3_PRE_VALIDATE: { len = snprintf (op_err, sizeof(op_err) - 1, "Pre Validation failed " "on %s. %s", peer_str, err_str); break; } case GLUSTERD_MGMT_V3_BRICK_OP: { len = snprintf (op_err, sizeof(op_err) - 1, "Brick ops failed " "on %s. %s", peer_str, err_str); break; } case GLUSTERD_MGMT_V3_COMMIT: { len = snprintf (op_err, sizeof(op_err) - 1, "Commit failed on %s. %s", peer_str, err_str); break; } case GLUSTERD_MGMT_V3_POST_VALIDATE: { len = snprintf (op_err, sizeof(op_err) - 1, "Post Validation failed " "on %s. %s", peer_str, err_str); break; } case GLUSTERD_MGMT_V3_VOLUME_UNLOCK: { len = snprintf (op_err, sizeof(op_err) - 1, "Unlocking volume failed " "on %s. %s", peer_str, err_str); break; } } op_err[len] = '\0'; if (args->errstr) { len = snprintf (err_str, sizeof(err_str) - 1, "%s\n%s", args->errstr, op_err); GF_FREE (args->errstr); args->errstr = NULL; } else len = snprintf (err_str, sizeof(err_str) - 1, "%s", op_err); err_str[len] = '\0'; gf_log ("", GF_LOG_ERROR, "%s", op_err); args->errstr = gf_strdup (err_str); } return; } int32_t gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict, char **op_errstr, dict_t *rsp_dict) { int ret = -1; xlator_t *this = THIS; switch (op) { case GD_OP_SNAP: { ret = glusterd_snapshot_prevalidate (dict, op_errstr, rsp_dict); break; } default: break; } gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret); return ret; } int32_t gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict, char **op_errstr, dict_t *rsp_dict) { int ret = -1; xlator_t *this = THIS; ret = 0; gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret); return ret; } int32_t gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict, char **op_errstr, dict_t *rsp_dict) { int ret = -1; xlator_t *this = THIS; switch (op) { case GD_OP_SNAP: { ret = glusterd_snapshot (dict, op_errstr, rsp_dict); if (ret) goto out; break; } default: break; } ret = 0; out: gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret); return ret; } int32_t gd_mgmt_v3_post_validate_fn (glusterd_op_t op, dict_t *dict, char **op_errstr, dict_t *rsp_dict) { int ret = -1; xlator_t *this = THIS; ret = 0; gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret); return ret; } int32_t gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; struct syncargs *args = NULL; glusterd_peerinfo_t *peerinfo = NULL; gd1_mgmt_v3_vol_lock_rsp rsp = {{0},}; call_frame_t *frame = NULL; int op_ret = -1; int op_errno = -1; GF_ASSERT(req); GF_ASSERT(iov); GF_ASSERT(myframe); frame = myframe; args = frame->local; peerinfo = frame->cookie; frame->local = NULL; frame->cookie = NULL; if (-1 == req->rpc_status) { op_errno = ENOTCONN; goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp); if (ret < 0) goto out; uuid_copy (args->uuid, rsp.uuid); op_ret = rsp.op_ret; op_errno = rsp.op_errno; out: gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL, GLUSTERD_MGMT_V3_VOLUME_LOCK, peerinfo, rsp.uuid); STACK_DESTROY (frame->root); synctask_barrier_wake(args); return 0; } int32_t gd_mgmt_v3_vol_lock_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { return glusterd_big_locked_cbk (req, iov, count, myframe, gd_mgmt_v3_vol_lock_cbk_fn); } int gd_mgmt_v3_vol_lock (glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t my_uuid, uuid_t recv_uuid) { int ret = -1; gd1_mgmt_v3_vol_lock_req req = {{0},}; glusterd_conf_t *conf = THIS->private; GF_ASSERT(op_ctx); GF_ASSERT(peerinfo); GF_ASSERT(args); ret = dict_allocate_and_serialize (op_ctx, &req.dict.dict_val, &req.dict.dict_len); if (ret) goto out; uuid_copy (req.uuid, my_uuid); req.op = op; synclock_unlock (&conf->big_lock); ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_VOLUME_LOCK, gd_mgmt_v3_vol_lock_cbk, (xdrproc_t) xdr_gd1_mgmt_v3_vol_lock_req); synclock_lock (&conf->big_lock); out: gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op, dict_t *dict, char **op_errstr, int npeers, gf_boolean_t *is_acquired) { int ret = -1; int peer_cnt = 0; char *volname = NULL; uuid_t peer_uuid = {0}; xlator_t *this = NULL; glusterd_peerinfo_t *peerinfo = NULL; struct syncargs args = {0}; struct list_head *peers = NULL; this = THIS; peers = &conf->xaction_peers; /* Volume(s) lock on local node */ ret = dict_get_str (dict, "volname", &volname); if (ret) { /* Trying to acquire volume locks on multiple volumes */ ret = glusterd_multiple_volumes_lock (dict, MY_UUID); if (ret) { gf_log ("", GF_LOG_ERROR, "Failed to acquire volume locks on localhost"); goto out; } } else { ret = glusterd_volume_lock (volname, MY_UUID); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Unable to acquire local lock for %s", volname); goto out; } } *is_acquired = _gf_true; if (!npeers) { ret = 0; goto out; } /* Sending Volume lock req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; list_for_each_entry (peerinfo, peers, op_peers_list) { gd_mgmt_v3_vol_lock (op, dict, peerinfo, &args, MY_UUID, peer_uuid); peer_cnt++; } gd_synctask_barrier_wait((&args), peer_cnt); if (args.errstr) *op_errstr = gf_strdup (args.errstr); ret = args.op_ret; gf_log (this->name, GF_LOG_DEBUG, "Sent lock op req for %s " "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret); out: if (ret) { if (*op_errstr) gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr); if (volname) ret = gf_asprintf (op_errstr, "Another transaction is in progress " "for %s. Please try again after sometime.", volname); else ret = gf_asprintf (op_errstr, "Another transaction is in progress. " "Please try again after sometime."); if (ret == -1) *op_errstr = NULL; ret = -1; } return ret; } int32_t gd_mgmt_v3_pre_validate_cbk_fn (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; struct syncargs *args = NULL; glusterd_peerinfo_t *peerinfo = NULL; gd1_mgmt_v3_pre_val_rsp rsp = {{0},}; call_frame_t *frame = NULL; int op_ret = -1; int op_errno = -1; GF_ASSERT(req); GF_ASSERT(iov); GF_ASSERT(myframe); frame = myframe; args = frame->local; peerinfo = frame->cookie; frame->local = NULL; frame->cookie = NULL; if (-1 == req->rpc_status) { op_errno = ENOTCONN; goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp); if (ret < 0) goto out; uuid_copy (args->uuid, rsp.uuid); op_ret = rsp.op_ret; op_errno = rsp.op_errno; out: gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL, GLUSTERD_MGMT_V3_PRE_VALIDATE, peerinfo, rsp.uuid); STACK_DESTROY (frame->root); synctask_barrier_wake(args); return 0; } int32_t gd_mgmt_v3_pre_validate_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { return glusterd_big_locked_cbk (req, iov, count, myframe, gd_mgmt_v3_pre_validate_cbk_fn); } int gd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t my_uuid, uuid_t recv_uuid) { int ret = -1; gd1_mgmt_v3_pre_val_req req = {{0},}; glusterd_conf_t *conf = THIS->private; GF_ASSERT(op_ctx); GF_ASSERT(peerinfo); GF_ASSERT(args); ret = dict_allocate_and_serialize (op_ctx, &req.dict.dict_val, &req.dict.dict_len); if (ret) goto out; uuid_copy (req.uuid, my_uuid); req.op = op; synclock_unlock (&conf->big_lock); ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_PRE_VALIDATE, gd_mgmt_v3_pre_validate_cbk, (xdrproc_t) xdr_gd1_mgmt_v3_pre_val_req); synclock_lock (&conf->big_lock); out: gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op, dict_t *req_dict, char **op_errstr, int npeers) { int ret = -1; int peer_cnt = 0; dict_t *rsp_dict = NULL; glusterd_peerinfo_t *peerinfo = NULL; struct syncargs args = {0}; struct list_head *peers = NULL; uuid_t peer_uuid = {0}; xlator_t *this = NULL; this = THIS; peers = &conf->xaction_peers; rsp_dict = dict_new (); if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, "Failed to create response dictionary"); goto out; } /* Pre Validation on local node */ ret = gd_mgmt_v3_pre_validate_fn (op, req_dict, op_errstr, rsp_dict); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Pre Validation failed for " "operation %s on local node", gd_op_list[op]); if (*op_errstr == NULL) { ret = gf_asprintf (op_errstr, "Pre-validation failed " "on localhost"); if (ret == -1) *op_errstr = NULL; ret = -1; } goto out; } dict_unref (rsp_dict); rsp_dict = NULL; if (!npeers) { ret = 0; goto out; } /* Sending Pre Validation req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; list_for_each_entry (peerinfo, peers, op_peers_list) { gd_mgmt_v3_pre_validate (op, req_dict, peerinfo, &args, MY_UUID, peer_uuid); peer_cnt++; } gd_synctask_barrier_wait((&args), peer_cnt); if (args.op_ret) { gf_log (this->name, GF_LOG_ERROR, "Pre Validation failed on peers"); if (args.errstr) *op_errstr = gf_strdup (args.errstr); } ret = args.op_ret; gf_log (this->name, GF_LOG_DEBUG, "Sent pre valaidation req for %s " "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret); out: return ret; } int glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict, glusterd_op_t op) { int ret = -1; dict_t *req_dict = NULL; xlator_t *this = NULL; GF_ASSERT (req); this = THIS; GF_ASSERT (this); req_dict = dict_new (); if (!req_dict) goto out; switch (op) { case GD_OP_SNAP: dict_copy (dict, req_dict); break; default: break; } *req = req_dict; ret = 0; out: return ret; } int32_t gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; struct syncargs *args = NULL; glusterd_peerinfo_t *peerinfo = NULL; gd1_mgmt_v3_brick_op_rsp rsp = {{0},}; call_frame_t *frame = NULL; int op_ret = -1; int op_errno = -1; GF_ASSERT(req); GF_ASSERT(iov); GF_ASSERT(myframe); frame = myframe; args = frame->local; peerinfo = frame->cookie; frame->local = NULL; frame->cookie = NULL; if (-1 == req->rpc_status) { op_errno = ENOTCONN; goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp); if (ret < 0) goto out; uuid_copy (args->uuid, rsp.uuid); op_ret = rsp.op_ret; op_errno = rsp.op_errno; out: gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL, GLUSTERD_MGMT_V3_BRICK_OP, peerinfo, rsp.uuid); STACK_DESTROY (frame->root); synctask_barrier_wake(args); return 0; } int32_t gd_mgmt_v3_brick_op_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { return glusterd_big_locked_cbk (req, iov, count, myframe, gd_mgmt_v3_brick_op_cbk_fn); } int gd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t my_uuid, uuid_t recv_uuid) { int ret = -1; gd1_mgmt_v3_brick_op_req req = {{0},}; glusterd_conf_t *conf = THIS->private; GF_ASSERT(op_ctx); GF_ASSERT(peerinfo); GF_ASSERT(args); ret = dict_allocate_and_serialize (op_ctx, &req.dict.dict_val, &req.dict.dict_len); if (ret) goto out; uuid_copy (req.uuid, my_uuid); req.op = op; synclock_unlock (&conf->big_lock); ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_BRICK_OP, gd_mgmt_v3_brick_op_cbk, (xdrproc_t) xdr_gd1_mgmt_v3_brick_op_req); synclock_lock (&conf->big_lock); out: gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op, dict_t *req_dict, char **op_errstr, int npeers) { int ret = -1; int peer_cnt = 0; dict_t *rsp_dict = NULL; glusterd_peerinfo_t *peerinfo = NULL; struct syncargs args = {0}; struct list_head *peers = NULL; uuid_t peer_uuid = {0}; xlator_t *this = NULL; this = THIS; peers = &conf->xaction_peers; rsp_dict = dict_new (); if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, "Failed to create response dictionary"); goto out; } /* Perform brick op on local node */ ret = gd_mgmt_v3_brick_op_fn (op, req_dict, op_errstr, rsp_dict); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Brick ops failed for " "operation %s on local node", gd_op_list[op]); if (*op_errstr == NULL) { ret = gf_asprintf (op_errstr, "Brick ops failed " "on localhost"); if (ret == -1) *op_errstr = NULL; ret = -1; } goto out; } dict_unref (rsp_dict); rsp_dict = NULL; if (!npeers) { ret = 0; goto out; } /* Sending brick op req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; list_for_each_entry (peerinfo, peers, op_peers_list) { gd_mgmt_v3_brick_op (op, req_dict, peerinfo, &args, MY_UUID, peer_uuid); peer_cnt++; } gd_synctask_barrier_wait((&args), peer_cnt); if (args.op_ret) { gf_log (this->name, GF_LOG_ERROR, "Brick ops failed on peers"); if (args.errstr) *op_errstr = gf_strdup (args.errstr); } ret = args.op_ret; gf_log (this->name, GF_LOG_DEBUG, "Sent brick op req for %s " "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret); out: return ret; } int32_t gd_mgmt_v3_commit_cbk_fn (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; struct syncargs *args = NULL; glusterd_peerinfo_t *peerinfo = NULL; gd1_mgmt_v3_commit_rsp rsp = {{0},}; call_frame_t *frame = NULL; int op_ret = -1; int op_errno = -1; GF_ASSERT(req); GF_ASSERT(iov); GF_ASSERT(myframe); frame = myframe; args = frame->local; peerinfo = frame->cookie; frame->local = NULL; frame->cookie = NULL; if (-1 == req->rpc_status) { op_errno = ENOTCONN; goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp); if (ret < 0) goto out; uuid_copy (args->uuid, rsp.uuid); op_ret = rsp.op_ret; op_errno = rsp.op_errno; out: gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL, GLUSTERD_MGMT_V3_COMMIT, peerinfo, rsp.uuid); STACK_DESTROY (frame->root); synctask_barrier_wake(args); return 0; } int32_t gd_mgmt_v3_commit_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { return glusterd_big_locked_cbk (req, iov, count, myframe, gd_mgmt_v3_commit_cbk_fn); } int gd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t my_uuid, uuid_t recv_uuid) { int ret = -1; gd1_mgmt_v3_commit_req req = {{0},}; glusterd_conf_t *conf = THIS->private; GF_ASSERT(op_ctx); GF_ASSERT(peerinfo); GF_ASSERT(args); ret = dict_allocate_and_serialize (op_ctx, &req.dict.dict_val, &req.dict.dict_len); if (ret) goto out; uuid_copy (req.uuid, my_uuid); req.op = op; synclock_unlock (&conf->big_lock); ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_COMMIT, gd_mgmt_v3_commit_cbk, (xdrproc_t) xdr_gd1_mgmt_v3_commit_req); synclock_lock (&conf->big_lock); out: gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op, dict_t *req_dict, char **op_errstr, int npeers) { int ret = -1; int peer_cnt = 0; dict_t *rsp_dict = NULL; glusterd_peerinfo_t *peerinfo = NULL; struct syncargs args = {0}; struct list_head *peers = NULL; uuid_t peer_uuid = {0}; xlator_t *this = NULL; this = THIS; peers = &conf->xaction_peers; rsp_dict = dict_new (); if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, "Failed to create response dictionary"); goto out; } /* Commit on local node */ ret = gd_mgmt_v3_commit_fn (op, req_dict, op_errstr, rsp_dict); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Commit failed for " "operation %s on local node", gd_op_list[op]); if (*op_errstr == NULL) { ret = gf_asprintf (op_errstr, "Commit failed " "on localhost"); if (ret == -1) *op_errstr = NULL; ret = -1; } goto out; } dict_unref (rsp_dict); rsp_dict = NULL; if (!npeers) { ret = 0; goto out; } /* Sending commit req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; list_for_each_entry (peerinfo, peers, op_peers_list) { gd_mgmt_v3_commit (op, req_dict, peerinfo, &args, MY_UUID, peer_uuid); peer_cnt++; } gd_synctask_barrier_wait((&args), peer_cnt); if (args.op_ret) { gf_log (this->name, GF_LOG_ERROR, "Commit failed on peers"); if (args.errstr) *op_errstr = gf_strdup (args.errstr); } ret = args.op_ret; gf_log (this->name, GF_LOG_DEBUG, "Sent commit req for %s to %d " "peers. Returning %d", gd_op_list[op], peer_cnt, ret); out: return ret; } int32_t gd_mgmt_v3_post_validate_cbk_fn (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; struct syncargs *args = NULL; glusterd_peerinfo_t *peerinfo = NULL; gd1_mgmt_v3_post_val_rsp rsp = {{0},}; call_frame_t *frame = NULL; int op_ret = -1; int op_errno = -1; GF_ASSERT(req); GF_ASSERT(iov); GF_ASSERT(myframe); frame = myframe; args = frame->local; peerinfo = frame->cookie; frame->local = NULL; frame->cookie = NULL; if (-1 == req->rpc_status) { op_errno = ENOTCONN; goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp); if (ret < 0) goto out; uuid_copy (args->uuid, rsp.uuid); op_ret = rsp.op_ret; op_errno = rsp.op_errno; out: gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL, GLUSTERD_MGMT_V3_POST_VALIDATE, peerinfo, rsp.uuid); STACK_DESTROY (frame->root); synctask_barrier_wake(args); return 0; } int32_t gd_mgmt_v3_post_validate_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { return glusterd_big_locked_cbk (req, iov, count, myframe, gd_mgmt_v3_post_validate_cbk_fn); } int gd_mgmt_v3_post_validate (glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t my_uuid, uuid_t recv_uuid) { int ret = -1; gd1_mgmt_v3_post_val_req req = {{0},}; glusterd_conf_t *conf = THIS->private; GF_ASSERT(op_ctx); GF_ASSERT(peerinfo); GF_ASSERT(args); ret = dict_allocate_and_serialize (op_ctx, &req.dict.dict_val, &req.dict.dict_len); if (ret) goto out; uuid_copy (req.uuid, my_uuid); req.op = op; synclock_unlock (&conf->big_lock); ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_POST_VALIDATE, gd_mgmt_v3_post_validate_cbk, (xdrproc_t) xdr_gd1_mgmt_v3_post_val_req); synclock_lock (&conf->big_lock); out: gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op, dict_t *req_dict, char **op_errstr, int npeers) { int ret = -1; int peer_cnt = 0; dict_t *rsp_dict = NULL; glusterd_peerinfo_t *peerinfo = NULL; struct syncargs args = {0}; struct list_head *peers = NULL; uuid_t peer_uuid = {0}; xlator_t *this = NULL; this = THIS; peers = &conf->xaction_peers; rsp_dict = dict_new (); if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, "Failed to create response dictionary"); goto out; } /* Post Validation on local node */ ret = gd_mgmt_v3_post_validate_fn (op, req_dict, op_errstr, rsp_dict); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Post Validation failed for " "operation %s on local node", gd_op_list[op]); if (*op_errstr == NULL) { ret = gf_asprintf (op_errstr, "Post-validation failed " "on localhost"); if (ret == -1) *op_errstr = NULL; ret = -1; } goto out; } dict_unref (rsp_dict); rsp_dict = NULL; if (!npeers) { ret = 0; goto out; } /* Sending Post Validation req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; list_for_each_entry (peerinfo, peers, op_peers_list) { gd_mgmt_v3_post_validate (op, req_dict, peerinfo, &args, MY_UUID, peer_uuid); peer_cnt++; } gd_synctask_barrier_wait((&args), peer_cnt); if (args.op_ret) { gf_log (this->name, GF_LOG_ERROR, "Post Validation failed on peers"); if (args.errstr) *op_errstr = gf_strdup (args.errstr); } ret = args.op_ret; gf_log (this->name, GF_LOG_DEBUG, "Sent post valaidation req for %s " "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret); out: return ret; } int32_t gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { int ret = -1; struct syncargs *args = NULL; glusterd_peerinfo_t *peerinfo = NULL; gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},}; call_frame_t *frame = NULL; int op_ret = -1; int op_errno = -1; GF_ASSERT(req); GF_ASSERT(iov); GF_ASSERT(myframe); frame = myframe; args = frame->local; peerinfo = frame->cookie; frame->local = NULL; frame->cookie = NULL; if (-1 == req->rpc_status) { op_errno = ENOTCONN; goto out; } ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp); if (ret < 0) goto out; uuid_copy (args->uuid, rsp.uuid); op_ret = rsp.op_ret; op_errno = rsp.op_errno; out: gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL, GLUSTERD_MGMT_V3_VOLUME_UNLOCK, peerinfo, rsp.uuid); STACK_DESTROY (frame->root); synctask_barrier_wake(args); return 0; } int32_t gd_mgmt_v3_vol_unlock_cbk (struct rpc_req *req, struct iovec *iov, int count, void *myframe) { return glusterd_big_locked_cbk (req, iov, count, myframe, gd_mgmt_v3_vol_unlock_cbk_fn); } int gd_mgmt_v3_vol_unlock (glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t my_uuid, uuid_t recv_uuid) { int ret = -1; gd1_mgmt_v3_vol_unlock_req req = {{0},}; glusterd_conf_t *conf = THIS->private; GF_ASSERT(op_ctx); GF_ASSERT(peerinfo); GF_ASSERT(args); ret = dict_allocate_and_serialize (op_ctx, &req.dict.dict_val, &req.dict.dict_len); if (ret) goto out; uuid_copy (req.uuid, my_uuid); req.op = op; synclock_unlock (&conf->big_lock); ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_VOLUME_UNLOCK, gd_mgmt_v3_vol_unlock_cbk, (xdrproc_t) xdr_gd1_mgmt_v3_vol_unlock_req); synclock_lock (&conf->big_lock); out: gf_log ("", GF_LOG_DEBUG, "Returning %d", ret); return ret; } int glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op, dict_t *dict, char **op_errstr, int npeers, gf_boolean_t is_acquired) { int ret = -1; int peer_cnt = 0; uuid_t peer_uuid = {0}; xlator_t *this = NULL; glusterd_peerinfo_t *peerinfo = NULL; struct syncargs args = {0}; struct list_head *peers = NULL; this = THIS; peers = &conf->xaction_peers; /* If the lock has not been held during this * transaction, do not send unlock requests */ if (!is_acquired) goto out; if (!npeers) { ret = 0; goto out; } /* Sending Volume unlock req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; list_for_each_entry (peerinfo, peers, op_peers_list) { gd_mgmt_v3_vol_unlock (op, dict, peerinfo, &args, MY_UUID, peer_uuid); peer_cnt++; } gd_synctask_barrier_wait((&args), peer_cnt); if (args.op_ret) { gf_log (this->name, GF_LOG_ERROR, "Unlock failed on peers"); if (args.errstr) *op_errstr = gf_strdup (args.errstr); } ret = args.op_ret; gf_log (this->name, GF_LOG_DEBUG, "Sent unlock op req for %s " "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret); out: return ret; } int32_t glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op, dict_t *dict) { int ret = -1; int npeers = 0; dict_t *req_dict = NULL; dict_t *tmp_dict = NULL; glusterd_conf_t *conf = NULL; char *op_errstr = NULL; char *volname = NULL; xlator_t *this = NULL; gf_boolean_t is_acquired = _gf_false; uuid_t *originator_uuid = NULL; this = THIS; GF_ASSERT (this); conf = this->private; GF_ASSERT (conf); /* Save the MY_UUID as the originator_uuid. This originator_uuid * will be used by is_origin_glusterd() to determine if a node * is the originator node for a command. */ originator_uuid = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t); if (!originator_uuid) { ret = -1; goto out; } uuid_copy (*originator_uuid, MY_UUID); ret = dict_set_bin (dict, "originator_uuid", originator_uuid, sizeof (uuid_t)); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to set originator_uuid."); goto out; } /* Marking the operation as complete synctasked */ ret = dict_set_int32 (dict, "is_synctasked", _gf_true); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to set synctasked flag."); goto out; } /* Use a copy at local unlock as cli response will be sent before * the unlock and the volname in the dict might be removed */ tmp_dict = dict_new(); if (!tmp_dict) { gf_log ("", GF_LOG_ERROR, "Unable to create dict"); goto out; } dict_copy (dict, tmp_dict); /* BUILD PEERS LIST */ INIT_LIST_HEAD (&conf->xaction_peers); npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op); /* LOCKDOWN PHASE - Based on the number of volumes either single * or multiple volume locks is acquired */ ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr, npeers, &is_acquired); if (ret) { gf_log ("", GF_LOG_ERROR, "Volume lockdown failed."); goto out; } /* BUILD PAYLOAD */ ret = glusterd_mgmt_v3_build_payload (&req_dict, &op_errstr, dict, op); if (ret) { gf_log (this->name, GF_LOG_ERROR, LOGSTR_BUILD_PAYLOAD, gd_op_list[op]); if (op_errstr == NULL) gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD); goto out; } /* PRE-COMMIT VALIDATE PHASE */ ret = glusterd_mgmt_v3_pre_validate (conf, op, req_dict, &op_errstr, npeers); if (ret) { gf_log ("", GF_LOG_ERROR, "Pre Validation Failed"); goto out; } /* BRICK OP PHASE */ ret = glusterd_mgmt_v3_brick_op (conf, op, req_dict, &op_errstr, npeers); if (ret) { gf_log ("", GF_LOG_ERROR, "Brick Ops Failed"); goto out; } /* COMMIT OP PHASE */ ret = glusterd_mgmt_v3_commit (conf, op, req_dict, &op_errstr, npeers); if (ret) { gf_log ("", GF_LOG_ERROR, "Commit Op Failed"); goto out; } /* POST-COMMIT VALIDATE PHASE */ ret = glusterd_mgmt_v3_post_validate (conf, op, req_dict, &op_errstr, npeers); if (ret) { gf_log ("", GF_LOG_ERROR, "Post Validation Failed"); goto out; } ret = 0; out: /* UNLOCK PHASE FOR PEERS*/ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr, npeers, is_acquired); /* SEND CLI RESPONSE */ glusterd_op_send_cli_response (op, ret, 0, req, dict, op_errstr); /* LOCAL VOLUME(S) UNLOCK */ if (!is_acquired) goto cleanup; ret = dict_get_str (tmp_dict, "volname", &volname); if (ret) { /* Trying to release volume locks on multiple volumes */ ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID); if (ret) gf_log ("", GF_LOG_ERROR, "Failed to release volume locks on localhost"); } else { ret = glusterd_volume_unlock (volname, MY_UUID); if (ret) gf_log (this->name, GF_LOG_ERROR, "Unable to release local lock for %s", volname); } cleanup: if (req_dict) dict_unref (req_dict); if (tmp_dict) dict_unref (tmp_dict); if (op_errstr) { GF_FREE (op_errstr); op_errstr = NULL; } return 0; }