summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-mgmt.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-mgmt.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c78
1 files changed, 26 insertions, 52 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index e895f6e..cdc5184 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -302,23 +302,12 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- /* mgmt_v3 lock on local node */
- ret = dict_get_str (dict, "volname", &volname);
+ /* Trying to acquire multiple mgmt_v3 locks on local node */
+ ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID);
if (ret) {
- /* Trying to acquire multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID);
- if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to acquire mgmt_v3 locks on localhost");
- goto out;
- }
- } else {
- ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol");
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire local lock for %s", volname);
- goto out;
- }
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to acquire mgmt_v3 locks on localhost");
+ goto out;
}
*is_acquired = _gf_true;
@@ -1277,8 +1266,9 @@ out:
int
glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *dict, char **op_errstr, int npeers,
- gf_boolean_t is_acquired)
+ dict_t *dict, int32_t op_ret,
+ char **op_errstr, int npeers,
+ gf_boolean_t is_acquired)
{
int ret = -1;
int peer_cnt = 0;
@@ -1316,7 +1306,7 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
gf_log (this->name, GF_LOG_ERROR,
"Unlock failed on peers");
- if (args.errstr)
+ if (!op_ret && args.errstr)
*op_errstr = gf_strdup (args.errstr);
}
@@ -1339,7 +1329,6 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
char *op_errstr = NULL;
- char *volname = NULL;
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
uuid_t *originator_uuid = NULL;
@@ -1434,8 +1423,9 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
ret = 0;
out:
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr,
- npeers, is_acquired);
+ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict,
+ ret, &op_errstr,
+ npeers, is_acquired);
/* SEND CLI RESPONSE */
glusterd_op_send_cli_response (op, ret, 0, req, dict, op_errstr);
@@ -1444,19 +1434,11 @@ out:
if (!is_acquired)
goto cleanup;
- ret = dict_get_str (tmp_dict, "volname", &volname);
- if (ret) {
- /* Trying to release multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release mgmt_v3 locks on localhost");
- } else {
- ret = glusterd_mgmt_v3_unlock (volname, MY_UUID, "vol");
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to release local lock for %s", volname);
- }
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release mgmt_v3 locks on localhost");
cleanup:
if (req_dict)
@@ -1483,7 +1465,6 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
char *op_errstr = NULL;
- char *volname = NULL;
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
uuid_t *originator_uuid = NULL;
@@ -1537,7 +1518,7 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
/* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
- npeers, &is_acquired);
+ npeers, &is_acquired);
if (ret) {
gf_log ("", GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
@@ -1612,8 +1593,9 @@ unbarrier:
out:
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr,
- npeers, is_acquired);
+ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict,
+ ret, &op_errstr,
+ npeers, is_acquired);
/* If the commit op (snapshot taking) failed, then the error is stored
in tmp_errstr and unbarrier is called. Suppose, if unbarrier also
@@ -1638,19 +1620,11 @@ out:
if (!is_acquired)
goto cleanup;
- ret = dict_get_str (tmp_dict, "volname", &volname);
- if (ret) {
- /* Trying to release multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release mgmt_v3 locks on localhost");
- } else {
- ret = glusterd_mgmt_v3_unlock (volname, MY_UUID, "vol");
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to release local lock for %s", volname);
- }
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
+ if (ret)
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to release mgmt_v3 locks on localhost");
cleanup:
if (req_dict)