diff options
author | Avra Sengupta <asengupt@redhat.com> | 2013-10-09 22:13:34 +0530 |
---|---|---|
committer | shishir gowda <sgowda@redhat.com> | 2013-11-15 12:37:58 +0530 |
commit | 8c89a5ffc9d1a9aa6a52a915cdd988c40aececb7 (patch) | |
tree | a5629248a8c8f78637370920c26445310c283fe3 /xlators/mgmt/glusterd/src/glusterd-mgmt.c | |
parent | 99a7b58a2983788a3bb36662d2b83c2da3b6472c (diff) |
glusterd/locks: Adding multiple volume locks supports
Also linking snap create command to mgmt_v3
Change-Id: If2ed29be072e10d0b0bd271d53e48eeaa6501ed7
Signed-off-by: Avra Sengupta <asengupt@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-mgmt.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-mgmt.c | 169 |
1 files changed, 93 insertions, 76 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c index 16bff218b..268a834d8 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c @@ -249,11 +249,12 @@ out: int glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op, - dict_t *dict, char *volname, char **op_errstr, - int npeers, gf_boolean_t *is_acquired) + dict_t *dict, char **op_errstr, int npeers, + gf_boolean_t *is_acquired) { int ret = -1; int peer_cnt = 0; + char *volname = NULL; uuid_t peer_uuid = {0}; xlator_t *this = NULL; glusterd_peerinfo_t *peerinfo = NULL; @@ -263,21 +264,32 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op, this = THIS; peers = &conf->xaction_peers; - if (!npeers) { - ret = 0; - goto out; - } - - /* Volume lock on local node */ - ret = glusterd_volume_lock (volname, MY_UUID); + /* Volume(s) lock on local node */ + ret = dict_get_str (dict, "volname", &volname); if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Unable to acquire local lock for %s", volname); - goto out; + /* Trying to acquire volume locks on multiple volumes */ + ret = glusterd_multiple_volumes_lock (dict, MY_UUID); + if (ret) { + gf_log ("", GF_LOG_ERROR, + "Failed to acquire volume locks on localhost"); + goto out; + } + } else { + ret = glusterd_volume_lock (volname, MY_UUID); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Unable to acquire local lock for %s", volname); + goto out; + } } *is_acquired = _gf_true; + if (!npeers) { + ret = 0; + goto out; + } + /* Sending Volume lock req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; @@ -423,11 +435,6 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op, this = THIS; peers = &conf->xaction_peers; - if (!npeers) { - ret = 0; - goto out; - } - rsp_dict = dict_new (); if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, @@ -460,6 +467,11 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op, dict_unref (rsp_dict); rsp_dict = NULL; + if (!npeers) { + ret = 0; + goto out; + } + /* Sending Pre Validation req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; @@ -619,11 +631,6 @@ glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op, this = THIS; peers = &conf->xaction_peers; - if (!npeers) { - ret = 0; - goto out; - } - rsp_dict = dict_new (); if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, @@ -656,6 +663,11 @@ glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op, dict_unref (rsp_dict); rsp_dict = NULL; + if (!npeers) { + ret = 0; + goto out; + } + /* Sending brick op req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; @@ -785,11 +797,6 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op, this = THIS; peers = &conf->xaction_peers; - if (!npeers) { - ret = 0; - goto out; - } - rsp_dict = dict_new (); if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, @@ -822,6 +829,11 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op, dict_unref (rsp_dict); rsp_dict = NULL; + if (!npeers) { + ret = 0; + goto out; + } + /* Sending commit req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; @@ -951,11 +963,6 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op, this = THIS; peers = &conf->xaction_peers; - if (!npeers) { - ret = 0; - goto out; - } - rsp_dict = dict_new (); if (!rsp_dict) { gf_log (this->name, GF_LOG_ERROR, @@ -988,6 +995,11 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op, dict_unref (rsp_dict); rsp_dict = NULL; + if (!npeers) { + ret = 0; + goto out; + } + /* Sending Post Validation req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; @@ -1103,8 +1115,8 @@ out: int glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op, - dict_t *dict, char *volname, char **op_errstr, - int npeers, gf_boolean_t is_acquired) + dict_t *dict, char **op_errstr, int npeers, + gf_boolean_t is_acquired) { int ret = -1; int peer_cnt = 0; @@ -1117,16 +1129,16 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op, this = THIS; peers = &conf->xaction_peers; - if (!npeers) { - ret = 0; - goto out; - } - /* If the lock has not been held during this * transaction, do not send unlock requests */ if (!is_acquired) goto out; + if (!npeers) { + ret = 0; + goto out; + } + /* Sending Volume unlock req to other nodes in the cluster */ synctask_barrier_init((&args)); peer_cnt = 0; @@ -1161,9 +1173,9 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op, int ret = -1; int npeers = 0; dict_t *req_dict = NULL; + dict_t *tmp_dict = NULL; glusterd_conf_t *conf = NULL; char *op_errstr = NULL; - char *tmp = NULL; char *volname = NULL; xlator_t *this = NULL; gf_boolean_t is_acquired = _gf_false; @@ -1193,28 +1205,31 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op, goto out; } + /* Marking the operation as complete synctasked */ + ret = dict_set_int32 (dict, "is_synctasked", _gf_true); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "Failed to set synctasked flag."); + goto out; + } + + /* Use a copy at local unlock as cli response will be sent before + * the unlock and the volname in the dict might be removed */ + tmp_dict = dict_new(); + if (!tmp_dict) { + gf_log ("", GF_LOG_ERROR, "Unable to create dict"); + goto out; + } + dict_copy (dict, tmp_dict); /* BUILD PEERS LIST */ INIT_LIST_HEAD (&conf->xaction_peers); npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op); - ret = dict_get_str (dict, "volname", &tmp); - if (ret) { - gf_log ("", GF_LOG_DEBUG, "Failed to get volume " - "name"); - goto out; - } else { - /* Use a copy of volname, as cli response will be - * sent before the unlock, and the volname in the - * dict, might be removed */ - volname = gf_strdup (tmp); - if (!volname) - goto out; - } - - /* LOCKDOWN PHASE */ - ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, volname, - &op_errstr, npeers, &is_acquired); + /* LOCKDOWN PHASE - Based on the number of volumes either single + * or multiple volume locks is acquired */ + ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr, + npeers, &is_acquired); if (ret) { gf_log ("", GF_LOG_ERROR, "Volume lockdown failed."); goto out; @@ -1265,35 +1280,37 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op, ret = 0; out: /* UNLOCK PHASE FOR PEERS*/ - (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, volname, - &op_errstr, npeers, is_acquired); + (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr, + npeers, is_acquired); /* SEND CLI RESPONSE */ glusterd_op_send_cli_response (op, ret, 0, req, dict, op_errstr); - /* Volume unlock on local node */ - ret = glusterd_volume_unlock (volname, MY_UUID); - if (ret) { - gf_log (this->name, GF_LOG_ERROR, - "Unable to release local lock for %s", volname); - if (op_errstr == NULL) { - ret = gf_asprintf (&op_errstr, - "Failed to release lock " - "on localhost"); - if (ret == -1) - op_errstr = NULL; + /* LOCAL VOLUME(S) UNLOCK */ + if (!is_acquired) + goto cleanup; - ret = -1; - } - goto out; + ret = dict_get_str (tmp_dict, "volname", &volname); + if (ret) { + /* Trying to release volume locks on multiple volumes */ + ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID); + if (ret) + gf_log ("", GF_LOG_ERROR, + "Failed to release volume locks on localhost"); + } else { + ret = glusterd_volume_unlock (volname, MY_UUID); + if (ret) + gf_log (this->name, GF_LOG_ERROR, + "Unable to release local lock for %s", volname); } - if (volname) - GF_FREE (volname); - +cleanup: if (req_dict) dict_unref (req_dict); + if (tmp_dict) + dict_unref (tmp_dict); + if (op_errstr) { GF_FREE (op_errstr); op_errstr = NULL; |