summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-syncop.c
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2014-02-11 02:22:32 +0000
committerVijay Bellur <vbellur@redhat.com>2014-02-14 07:05:30 -0800
commit53779e4458c17a3978675585e8099c97c8c2b3a2 (patch)
treea01ecbac5ddedc438008c57550a91ea481121b81 /xlators/mgmt/glusterd/src/glusterd-syncop.c
parenta78dfebb7343671b0a3a0af8b46951894a3cf7a4 (diff)
glusterd/Vol-Locks : Moving globals into glusterd priv and code refactoring
Moved globals(vol_lock and txn_opinfo dicts and global_txn_id) into glusterd priv Moved glusterd_op_send_cli_response() out of gd_unlock_op_phase as gd_unlock_op_phase and glusterd_clear_txn_opinfo should only be called if the txn id has been successfully generated. The cli resp should be sent irrespective of that. Changed log levels from ERROR to WARNING for some volume lock logs where the logs are expected and is not an error Added logs for better transparency of transaction ids. Change-Id: Ifac9b23aa9f1648c9ae252cfd3ac50bb2ed46728 BUG: 1011470 Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/6976 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-syncop.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c85
1 files changed, 54 insertions, 31 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 5eb5e9f3899..578bce897df 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -1313,7 +1313,7 @@ out:
}
int
-gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int op_ret,
+gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
rpcsvc_request_t *req, dict_t *op_ctx, char *op_errstr,
int npeers, char *volname, gf_boolean_t is_acquired,
uuid_t txn_id)
@@ -1336,8 +1336,10 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int op_ret,
/* If the lock has not been held during this
* transaction, do not send unlock requests */
- if (!is_acquired)
+ if (!is_acquired) {
+ ret = 0;
goto out;
+ }
this = THIS;
synctask_barrier_init((&args));
@@ -1376,7 +1378,11 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int op_ret,
}
out:
- glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr);
+ /* If unlock failed, and op_ret was previously set
+ * priority is given to the op_ret. If op_ret was
+ * not set, and unlock failed, then set op_ret */
+ if (!*op_ret)
+ *op_ret = ret;
if (is_acquired) {
/* Based on the op-version,
@@ -1397,6 +1403,9 @@ out:
}
}
+ if (!*op_ret)
+ *op_ret = ret;
+
return 0;
}
@@ -1486,6 +1495,7 @@ void
gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
{
int ret = -1;
+ int op_ret = -1;
int npeers = 0;
dict_t *req_dict = NULL;
glusterd_conf_t *conf = NULL;
@@ -1504,6 +1514,14 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
conf = this->private;
GF_ASSERT (conf);
+ ret = dict_get_int32 (op_ctx, GD_SYNC_OPCODE_KEY, &tmp_op);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get volume "
+ "operation");
+ goto out;
+ }
+ op = tmp_op;
+
/* Generate a transaction-id for this operation and
* save it in the dict */
ret = glusterd_generate_txn_id (op_ctx, &txn_id);
@@ -1511,9 +1529,20 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
gf_log (this->name, GF_LOG_ERROR,
"Failed to generate transaction id");
goto out;
-
}
+ /* Save opinfo for this transaction with the transaction id */
+ glusterd_txn_opinfo_init (&txn_opinfo, NULL, &op, NULL, NULL);
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_opinfo);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Transaction ID : %s", uuid_utoa (*txn_id));
+
+ opinfo = txn_opinfo;
+
/* Save the MY_UUID as the originator_uuid */
ret = glusterd_set_originator_uuid (op_ctx);
if (ret) {
@@ -1522,15 +1551,6 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
goto out;
}
- ret = dict_get_int32 (op_ctx, GD_SYNC_OPCODE_KEY, &tmp_op);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get volume "
- "operation");
- goto out;
- }
-
- op = tmp_op;
-
/* Based on the op_version, acquire a cluster or volume lock */
if (conf->op_version < GD_OP_VERSION_4) {
ret = glusterd_lock (MY_UUID);
@@ -1576,26 +1596,20 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
local_locking_done:
- /* Save opinfo for this transaction with the transaction id */
- glusterd_txn_opinfo_init (&txn_opinfo, NULL, &op, NULL, NULL);
- ret = glusterd_set_txn_opinfo (txn_id, &txn_opinfo);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to set transaction's opinfo");
-
- opinfo = txn_opinfo;
-
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
/* If no volname is given as a part of the command, locks will
* not be held */
- if (volname) {
+ if (volname || (conf->op_version < GD_OP_VERSION_4)) {
ret = gd_lock_op_phase (conf, op, op_ctx, &op_errstr,
npeers, *txn_id);
- if (ret)
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Locking Peers Failed.");
goto out;
+ }
}
ret = glusterd_op_build_payload (&req_dict, &op_errstr, op_ctx);
@@ -1623,14 +1637,23 @@ local_locking_done:
ret = 0;
out:
- (void) gd_unlock_op_phase (conf, op, ret, req, op_ctx, op_errstr,
- npeers, volname, is_acquired, *txn_id);
+ op_ret = ret;
+ if (txn_id) {
+ (void) gd_unlock_op_phase (conf, op, &op_ret, req,
+ op_ctx, op_errstr,
+ npeers, volname,
+ is_acquired, *txn_id);
+
+ /* Clearing the transaction opinfo */
+ ret = glusterd_clear_txn_opinfo (txn_id);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to clear transaction's "
+ "opinfo for transaction ID : %s",
+ uuid_utoa (*txn_id));
+ }
- /* Clearing the transaction opinfo */
- ret = glusterd_clear_txn_opinfo (txn_id);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to clear transaction's opinfo");
+ glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr);
if (volname)
GF_FREE (volname);