summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-op-sm.c
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2014-03-01 09:04:16 +0530
committerRajesh Joseph <rjoseph@redhat.com>2014-03-07 04:29:33 -0800
commit44428343529bec83fab0e3519396471fc8f651b4 (patch)
tree3f1e2495cd03b122a558e16e3512ec319992d4ac /xlators/mgmt/glusterd/src/glusterd-op-sm.c
parent2e58513506919899115935c2ca6b2359fdeff7b8 (diff)
glusterd/mgmt_v3 locks: Generalization of the volume wide locks.
Renaming volume locks as mgmt_v3 locks Change-Id: I2a324e2b8e1772d7b165fe96ce8ba5b902c2ed9a Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/7187 Reviewed-by: Rajesh Joseph <rjoseph@redhat.com> Tested-by: Rajesh Joseph <rjoseph@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-op-sm.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 625434bc8..05e4b71a0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2558,7 +2558,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- /* Based on the op_version, acquire a cluster or volume lock */
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_LOCK];
if (proc->fn) {
@@ -2578,7 +2578,8 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
dict = glusterd_op_get_ctx ();
dict_ref (dict);
- proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_VOLUME_LOCK];
+ proc = &peerinfo->mgmt_v3->proctable
+ [GLUSTERD_MGMT_V3_LOCK];
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo",
peerinfo);
@@ -2592,7 +2593,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn (NULL, this, dict);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
- "Failed to send volume lock "
+ "Failed to send mgmt_v3 lock "
"request for operation "
"'Volume %s' to peer %s",
gd_op_list[opinfo.op],
@@ -2638,7 +2639,8 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- /* Based on the op_version, release the cluster or volume lock */
+ /* Based on the op_version, release the *
+ * cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_UNLOCK];
if (proc->fn) {
@@ -2658,7 +2660,8 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
dict = glusterd_op_get_ctx ();
dict_ref (dict);
- proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_VOLUME_UNLOCK];
+ proc = &peerinfo->mgmt_v3->proctable
+ [GLUSTERD_MGMT_V3_UNLOCK];
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo",
peerinfo);
@@ -2672,7 +2675,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn (NULL, this, dict);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
- "Failed to send volume unlock "
+ "Failed to send mgmt_v3 unlock "
"request for operation "
"'Volume %s' to peer %s",
gd_op_list[opinfo.op],
@@ -2736,7 +2739,7 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
/* If the req came from a node running on older op_version
* the dict won't be present. Based on it acquiring a cluster
- * or volume lock */
+ * or mgmt_v3 lock */
if (lock_ctx->dict == NULL) {
ret = glusterd_lock (lock_ctx->uuid);
glusterd_op_lock_send_resp (lock_ctx->req, ret);
@@ -2746,15 +2749,15 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire volname");
else {
- ret = glusterd_volume_lock (volname, lock_ctx->uuid);
+ ret = glusterd_mgmt_v3_lock (volname, lock_ctx->uuid);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire lock for %s",
volname);
}
- glusterd_op_volume_lock_send_resp (lock_ctx->req,
- &event->txn_id, ret);
+ glusterd_op_mgmt_v3_lock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
dict_unref (lock_ctx->dict);
}
@@ -2783,7 +2786,7 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
/* If the req came from a node running on older op_version
* the dict won't be present. Based on it releasing the cluster
- * or volume lock */
+ * or mgmt_v3 lock */
if (lock_ctx->dict == NULL) {
ret = glusterd_unlock (lock_ctx->uuid);
glusterd_op_unlock_send_resp (lock_ctx->req, ret);
@@ -2793,14 +2796,14 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire volname");
else {
- ret = glusterd_volume_unlock (volname, lock_ctx->uuid);
+ ret = glusterd_mgmt_v3_unlock (volname, lock_ctx->uuid);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s", volname);
}
- glusterd_op_volume_unlock_send_resp (lock_ctx->req,
- &event->txn_id, ret);
+ glusterd_op_mgmt_v3_unlock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
dict_unref (lock_ctx->dict);
}
@@ -4063,7 +4066,7 @@ glusterd_op_txn_complete (uuid_t *txn_id)
glusterd_op_reset_ctx ();
glusterd_op_clear_errstr ();
- /* Based on the op-version, we release the cluster or volume lock */
+ /* Based on the op-version, we release the cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
ret = glusterd_unlock (MY_UUID);
/* unlock cant/shouldnt fail here!! */
@@ -4079,7 +4082,7 @@ glusterd_op_txn_complete (uuid_t *txn_id)
"Unable to acquire volname");
if (volname) {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s",