summaryrefslogtreecommitdiffstats
path: root/xlators
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2014-03-01 09:04:16 +0530
committerRajesh Joseph <rjoseph@redhat.com>2014-03-07 04:29:33 -0800
commit44428343529bec83fab0e3519396471fc8f651b4 (patch)
tree3f1e2495cd03b122a558e16e3512ec319992d4ac /xlators
parent2e58513506919899115935c2ca6b2359fdeff7b8 (diff)
glusterd/mgmt_v3 locks: Generalization of the volume wide locks.
Renaming volume locks as mgmt_v3 locks Change-Id: I2a324e2b8e1772d7b165fe96ce8ba5b902c2ed9a Signed-off-by: Avra Sengupta <asengupt@redhat.com> Reviewed-on: http://review.gluster.org/7187 Reviewed-by: Rajesh Joseph <rjoseph@redhat.com> Tested-by: Rajesh Joseph <rjoseph@redhat.com>
Diffstat (limited to 'xlators')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c33
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c102
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.h19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c114
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c126
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c35
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c68
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c89
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h8
11 files changed, 304 insertions, 298 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index e7024bd99..c0325f0a9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -642,7 +642,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
goto out;
}
- /* Based on the op_version, acquire a cluster or volume lock */
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
ret = glusterd_lock (MY_UUID);
if (ret) {
@@ -671,7 +671,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
goto out;
}
- ret = glusterd_volume_lock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_lock (volname, MY_UUID);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire lock for %s", volname);
@@ -718,11 +718,11 @@ local_locking_done:
out:
if (locked && ret) {
/* Based on the op-version, we release the
- * cluster or volume lock */
+ * cluster or mgmt_v3 lock */
if (priv->op_version < 3)
glusterd_unlock (MY_UUID);
else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s",
@@ -2053,11 +2053,11 @@ glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
}
int
-glusterd_op_volume_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
- int32_t status)
+glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
{
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
int ret = -1;
GF_ASSERT (req);
@@ -2069,20 +2069,20 @@ glusterd_op_volume_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
uuid_copy (rsp.txn_id, *txn_id);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
- gf_log (THIS->name, GF_LOG_DEBUG, "Responded to volume lock, ret: %d",
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to mgmt_v3 lock, ret: %d",
ret);
return ret;
}
int
-glusterd_op_volume_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
- int32_t status)
+glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
{
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
int ret = -1;
GF_ASSERT (req);
@@ -2094,9 +2094,10 @@ glusterd_op_volume_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
uuid_copy (rsp.txn_id, *txn_id);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
- gf_log (THIS->name, GF_LOG_DEBUG, "Responded to volume unlock, ret: %d",
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Responded to mgmt_v3 unlock, ret: %d",
ret);
return ret;
@@ -4068,8 +4069,8 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
if (peerinfo->connected) {
list_for_each_entry (volinfo, &conf->volumes, vol_list) {
- ret = glusterd_volume_unlock (volinfo->volname,
- peerinfo->uuid);
+ ret = glusterd_mgmt_v3_unlock (volinfo->volname,
+ peerinfo->uuid);
if (ret)
gf_log (this->name, GF_LOG_TRACE,
"Lock not released for %s",
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index 0737a731e..d2601ebb8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -26,17 +26,17 @@
#include <signal.h>
-static dict_t *vol_lock;
+static dict_t *mgmt_v3_lock;
-/* Initialize the global vol-lock list(dict) when
+/* Initialize the global mgmt_v3 lock list(dict) when
* glusterd is spawned */
int32_t
-glusterd_vol_lock_init ()
+glusterd_mgmt_v3_lock_init ()
{
int32_t ret = -1;
- vol_lock = dict_new ();
- if (!vol_lock) {
+ mgmt_v3_lock = dict_new ();
+ if (!mgmt_v3_lock) {
ret = -1;
goto out;
}
@@ -46,29 +46,29 @@ out:
return ret;
}
-/* Destroy the global vol-lock list(dict) when
+/* Destroy the global mgmt_v3 lock list(dict) when
* glusterd cleanup is performed */
void
-glusterd_vol_lock_fini ()
+glusterd_mgmt_v3_lock_fini ()
{
- if (vol_lock)
- dict_destroy (vol_lock);
+ if (mgmt_v3_lock)
+ dict_destroy (mgmt_v3_lock);
}
int32_t
-glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid)
+glusterd_get_mgmt_v3_lock_owner (char *key, uuid_t *uuid)
{
- int32_t ret = -1;
- vol_lock_obj *lock_obj = NULL;
- uuid_t no_owner = {"\0"};
+ int32_t ret = -1;
+ mgmt_v3_lock_obj *lock_obj = NULL;
+ uuid_t no_owner = {"\0"};
- if (!volname || !uuid) {
- gf_log ("", GF_LOG_ERROR, "volname or uuid is null.");
+ if (!key || !uuid) {
+ gf_log ("", GF_LOG_ERROR, "key or uuid is null.");
ret = -1;
goto out;
}
- ret = dict_get_bin(vol_lock, volname, (void **) &lock_obj);
+ ret = dict_get_bin(mgmt_v3_lock, key, (void **) &lock_obj);
if (!ret)
uuid_copy (*uuid, lock_obj->lock_owner);
else
@@ -81,7 +81,7 @@ out:
}
int32_t
-glusterd_multiple_volumes_unlock (dict_t *dict, uuid_t uuid)
+glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid)
{
int32_t ret = -1;
int32_t op_ret = 0;
@@ -116,7 +116,7 @@ glusterd_multiple_volumes_unlock (dict_t *dict, uuid_t uuid)
goto out;
}
- ret = glusterd_volume_unlock (volname, uuid);
+ ret = glusterd_mgmt_v3_unlock (volname, uuid);
if (ret) {
gf_log ("", GF_LOG_ERROR,
"Failed to release lock for %s. ", volname);
@@ -131,7 +131,7 @@ out:
}
int32_t
-glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid)
+glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid)
{
int32_t ret = -1;
int32_t i = -1;
@@ -166,13 +166,13 @@ glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid)
goto out;
}
- ret = glusterd_volume_lock (volname, uuid);
+ ret = glusterd_mgmt_v3_lock (volname, uuid);
if (ret) {
gf_log ("", GF_LOG_ERROR,
"Failed to acquire lock for %s "
- "on behalf of %s. Unlocking "
- "other volumes locked by this "
- "transaction", volname, uuid_utoa(uuid));
+ "on behalf of %s. Reversing "
+ "this transaction", volname,
+ uuid_utoa(uuid));
break;
}
locked_volcount ++;
@@ -193,7 +193,7 @@ glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid)
goto out;
}
- ret = glusterd_volume_unlock (volname, uuid);
+ ret = glusterd_mgmt_v3_unlock (volname, uuid);
if (ret)
gf_log ("", GF_LOG_ERROR,
"Failed to release lock for %s.",
@@ -208,26 +208,26 @@ out:
}
int32_t
-glusterd_volume_lock (char *volname, uuid_t uuid)
+glusterd_mgmt_v3_lock (char *key, uuid_t uuid)
{
- int32_t ret = -1;
- vol_lock_obj *lock_obj = NULL;
- uuid_t owner = {0};
+ int32_t ret = -1;
+ mgmt_v3_lock_obj *lock_obj = NULL;
+ uuid_t owner = {0};
- if (!volname) {
- gf_log (THIS->name, GF_LOG_ERROR, "volname is null.");
+ if (!key) {
+ gf_log (THIS->name, GF_LOG_ERROR, "key is null.");
ret = -1;
goto out;
}
gf_log (THIS->name, GF_LOG_TRACE,
"Trying to acquire lock of %s for %s",
- volname, uuid_utoa (uuid));
+ key, uuid_utoa (uuid));
- ret = glusterd_get_vol_lock_owner (volname, &owner);
+ ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
if (ret) {
gf_log (THIS->name, GF_LOG_DEBUG,
- "Unable to get volume lock owner");
+ "Unable to get mgmt_v3 lock owner");
goto out;
}
@@ -235,13 +235,13 @@ glusterd_volume_lock (char *volname, uuid_t uuid)
* we fail */
if (!uuid_is_null (owner)) {
gf_log (THIS->name, GF_LOG_ERROR, "Lock for %s held by %s",
- volname, uuid_utoa (owner));
+ key, uuid_utoa (owner));
ret = -1;
goto out;
}
- lock_obj = GF_CALLOC (1, sizeof(vol_lock_obj),
- gf_common_mt_vol_lock_obj_t);
+ lock_obj = GF_CALLOC (1, sizeof(mgmt_v3_lock_obj),
+ gf_common_mt_mgmt_v3_lock_obj_t);
if (!lock_obj) {
ret = -1;
goto out;
@@ -249,18 +249,18 @@ glusterd_volume_lock (char *volname, uuid_t uuid)
uuid_copy (lock_obj->lock_owner, uuid);
- ret = dict_set_bin (vol_lock, volname, lock_obj,
- sizeof(vol_lock_obj));
+ ret = dict_set_bin (mgmt_v3_lock, key, lock_obj,
+ sizeof(*lock_obj));
if (ret) {
gf_log (THIS->name, GF_LOG_ERROR,
- "Unable to set lock owner in volume lock");
+ "Unable to set lock owner in mgmt_v3 lock");
if (lock_obj)
GF_FREE (lock_obj);
goto out;
}
gf_log (THIS->name, GF_LOG_DEBUG, "Lock for %s successfully held by %s",
- volname, uuid_utoa (uuid));
+ key, uuid_utoa (uuid));
ret = 0;
out:
@@ -269,30 +269,30 @@ out:
}
int32_t
-glusterd_volume_unlock (char *volname, uuid_t uuid)
+glusterd_mgmt_v3_unlock (char *key, uuid_t uuid)
{
int32_t ret = -1;
uuid_t owner = {0};
- if (!volname) {
- gf_log (THIS->name, GF_LOG_ERROR, "volname is null.");
+ if (!key) {
+ gf_log (THIS->name, GF_LOG_ERROR, "key is null.");
ret = -1;
goto out;
}
gf_log (THIS->name, GF_LOG_TRACE, "Trying to release lock of %s for %s",
- volname, uuid_utoa (uuid));
+ key, uuid_utoa (uuid));
- ret = glusterd_get_vol_lock_owner (volname, &owner);
+ ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
if (ret) {
gf_log (THIS->name, GF_LOG_DEBUG,
- "Unable to get volume lock owner");
+ "Unable to get mgmt_v3 lock owner");
goto out;
}
if (uuid_is_null (owner)) {
gf_log (THIS->name, GF_LOG_ERROR,
- "Lock for %s not held", volname);
+ "Lock for %s not held", key);
ret = -1;
goto out;
}
@@ -302,15 +302,15 @@ glusterd_volume_unlock (char *volname, uuid_t uuid)
gf_log (THIS->name, GF_LOG_ERROR, "Lock owner mismatch. "
"Lock for %s held by %s",
- volname, uuid_utoa (owner));
+ key, uuid_utoa (owner));
goto out;
}
- /* Removing the volume lock from the global list */
- dict_del (vol_lock, volname);
+ /* Removing the mgmt_v3 lock from the global list */
+ dict_del (mgmt_v3_lock, key);
gf_log (THIS->name, GF_LOG_DEBUG, "Lock for %s successfully released",
- volname);
+ key);
ret = 0;
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h
index 956ae7565..71339ed7f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.h
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.h
@@ -15,30 +15,29 @@
#include "config.h"
#endif
-struct volume_lock_object_ {
+typedef struct mgmt_v3_lock_object_ {
uuid_t lock_owner;
-};
-typedef struct volume_lock_object_ vol_lock_obj;
+} mgmt_v3_lock_obj;
int32_t
-glusterd_vol_lock_init ();
+glusterd_mgmt_v3_lock_init ();
void
-glusterd_vol_lock_fini ();
+glusterd_mgmt_v3_lock_fini ();
int32_t
-glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid);
+glusterd_get_mgmt_v3_lock_owner (char *volname, uuid_t *uuid);
int32_t
-glusterd_volume_lock (char *volname, uuid_t uuid);
+glusterd_mgmt_v3_lock (char *key, uuid_t uuid);
int32_t
-glusterd_volume_unlock (char *volname, uuid_t uuid);
+glusterd_mgmt_v3_unlock (char *key, uuid_t uuid);
int32_t
-glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid);
+glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid);
int32_t
-glusterd_multiple_volumes_unlock (dict_t *dict, uuid_t uuid);
+glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
index f4ecc486f..72181e963 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -27,10 +27,10 @@ glusterd_mgmt_v3_null (rpcsvc_request_t *req)
}
static int
-glusterd_mgmt_v3_vol_lock_send_resp (rpcsvc_request_t *req, int32_t status)
+glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status)
{
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
int ret = -1;
GF_ASSERT (req);
@@ -42,18 +42,18 @@ glusterd_mgmt_v3_vol_lock_send_resp (rpcsvc_request_t *req, int32_t status)
glusterd_get_uuid (&rsp.uuid);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
gf_log (THIS->name, GF_LOG_DEBUG,
- "Responded to volume lock, ret: %d", ret);
+ "Responded to mgmt_v3 lock, ret: %d", ret);
return ret;
}
static int
-glusterd_syctasked_volume_lock (rpcsvc_request_t *req,
- gd1_mgmt_v3_vol_lock_req *lock_req,
- glusterd_op_lock_ctx_t *ctx)
+glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_lock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
int32_t volcount = -1;
@@ -74,31 +74,31 @@ glusterd_syctasked_volume_lock (rpcsvc_request_t *req,
"Failed to get volname");
goto out;
}
- ret = glusterd_volume_lock (volname, ctx->uuid);
+ ret = glusterd_mgmt_v3_lock (volname, ctx->uuid);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire local lock for %s", volname);
} else {
- /* Trying to acquire volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_lock (ctx->dict, ctx->uuid);
+ /* Trying to acquire multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid);
if (ret)
gf_log ("", GF_LOG_ERROR,
- "Failed to acquire volume locks for %s",
+ "Failed to acquire mgmt_v3 locks for %s",
uuid_utoa (ctx->uuid));
}
out:
- ret = glusterd_mgmt_v3_vol_lock_send_resp (req, ret);
+ ret = glusterd_mgmt_v3_lock_send_resp (req, ret);
gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
static int
-glusterd_op_state_machine_volume_lock (rpcsvc_request_t *req,
- gd1_mgmt_v3_vol_lock_req *lock_req,
- glusterd_op_lock_ctx_t *ctx)
+glusterd_op_state_machine_mgmt_v3_lock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_lock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
xlator_t *this = NULL;
@@ -135,9 +135,9 @@ out:
}
static int
-glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_lock_fn (rpcsvc_request_t *req)
{
- gd1_mgmt_v3_vol_lock_req lock_req = {{0},};
+ gd1_mgmt_v3_lock_req lock_req = {{0},};
int32_t ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_op_lock_ctx_t *ctx = NULL;
@@ -149,7 +149,7 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
GF_ASSERT (req);
ret = xdr_to_generic (req->msg[0], &lock_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_req);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
if (ret < 0) {
gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock "
"request received from peer");
@@ -157,7 +157,7 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_DEBUG, "Received volume lock req "
+ gf_log (this->name, GF_LOG_DEBUG, "Received mgmt_v3 lock req "
"from uuid: %s", uuid_utoa (lock_req.uuid));
if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
@@ -191,11 +191,13 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
goto out;
}
- is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false);
+ is_synctasked = dict_get_str_boolean (ctx->dict,
+ "is_synctasked", _gf_false);
if (is_synctasked)
- ret = glusterd_syctasked_volume_lock (req, &lock_req, ctx);
+ ret = glusterd_synctasked_mgmt_v3_lock (req, &lock_req, ctx);
else
- ret = glusterd_op_state_machine_volume_lock (req, &lock_req, ctx);
+ ret = glusterd_op_state_machine_mgmt_v3_lock (req, &lock_req,
+ ctx);
out:
@@ -707,10 +709,10 @@ out:
}
static int
-glusterd_mgmt_v3_vol_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
+glusterd_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
{
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
int ret = -1;
GF_ASSERT (req);
@@ -722,18 +724,18 @@ glusterd_mgmt_v3_vol_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
glusterd_get_uuid (&rsp.uuid);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
gf_log (THIS->name, GF_LOG_DEBUG,
- "Responded to volume unlock, ret: %d", ret);
+ "Responded to mgmt_v3 unlock, ret: %d", ret);
return ret;
}
static int
-glusterd_syctasked_volume_unlock (rpcsvc_request_t *req,
- gd1_mgmt_v3_vol_unlock_req *unlock_req,
- glusterd_op_lock_ctx_t *ctx)
+glusterd_synctasked_mgmt_v3_unlock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_unlock_req *unlock_req,
+ glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
int32_t volcount = -1;
@@ -753,21 +755,21 @@ glusterd_syctasked_volume_unlock (rpcsvc_request_t *req,
"Failed to get volname");
goto out;
}
- ret = glusterd_volume_unlock (volname, ctx->uuid);
+ ret = glusterd_mgmt_v3_unlock (volname, ctx->uuid);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s", volname);
} else {
- /* Trying to release volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_unlock (ctx->dict, ctx->uuid);
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (ctx->dict, ctx->uuid);
if (ret)
gf_log ("", GF_LOG_ERROR,
- "Failed to release volume locks for %s",
+ "Failed to release mgmt_v3 locks for %s",
uuid_utoa(ctx->uuid));
}
out:
- ret = glusterd_mgmt_v3_vol_unlock_send_resp (req, ret);
+ ret = glusterd_mgmt_v3_unlock_send_resp (req, ret);
gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
@@ -775,9 +777,9 @@ out:
static int
-glusterd_op_state_machine_volume_unlock (rpcsvc_request_t *req,
- gd1_mgmt_v3_vol_unlock_req *lock_req,
- glusterd_op_lock_ctx_t *ctx)
+glusterd_op_state_machine_mgmt_v3_unlock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_unlock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
xlator_t *this = NULL;
@@ -800,9 +802,9 @@ glusterd_op_state_machine_volume_unlock (rpcsvc_request_t *req,
}
static int
-glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_unlock_fn (rpcsvc_request_t *req)
{
- gd1_mgmt_v3_vol_unlock_req lock_req = {{0},};
+ gd1_mgmt_v3_unlock_req lock_req = {{0},};
int32_t ret = -1;
glusterd_op_lock_ctx_t *ctx = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -814,7 +816,7 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
GF_ASSERT (req);
ret = xdr_to_generic (req->msg[0], &lock_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_req);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
if (ret < 0) {
gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock "
"request received from peer");
@@ -822,7 +824,7 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_DEBUG, "Received volume unlock req "
+ gf_log (this->name, GF_LOG_DEBUG, "Received mgmt_v3 unlock req "
"from uuid: %s", uuid_utoa (lock_req.uuid));
if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
@@ -856,11 +858,13 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
goto out;
}
- is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false);
+ is_synctasked = dict_get_str_boolean (ctx->dict,
+ "is_synctasked", _gf_false);
if (is_synctasked)
- ret = glusterd_syctasked_volume_unlock (req, &lock_req, ctx);
+ ret = glusterd_synctasked_mgmt_v3_unlock (req, &lock_req, ctx);
else
- ret = glusterd_op_state_machine_volume_unlock (req, &lock_req, ctx);
+ ret = glusterd_op_state_machine_mgmt_v3_unlock (req, &lock_req,
+ ctx);
out:
@@ -876,10 +880,10 @@ out:
}
int
-glusterd_handle_volume_lock (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_lock (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req,
- glusterd_handle_volume_lock_fn);
+ glusterd_handle_mgmt_v3_lock_fn);
}
static int
@@ -911,20 +915,20 @@ glusterd_handle_post_validate (rpcsvc_request_t *req)
}
int
-glusterd_handle_volume_unlock (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_unlock (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req,
- glusterd_handle_volume_unlock_fn);
+ glusterd_handle_mgmt_v3_unlock_fn);
}
rpcsvc_actor_t gd_svc_mgmt_v3_actors[] = {
- [GLUSTERD_MGMT_V3_NULL] = { "NULL", GLUSTERD_MGMT_V3_NULL, glusterd_mgmt_v3_null, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_VOLUME_LOCK] = { "VOL_LOCK", GLUSTERD_MGMT_V3_VOLUME_LOCK, glusterd_handle_volume_lock, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_PRE_VALIDATE] = { "PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE, glusterd_handle_pre_validate, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_BRICK_OP] = { "BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP, glusterd_handle_brick_op, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_COMMIT] = { "COMMIT", GLUSTERD_MGMT_V3_COMMIT, glusterd_handle_commit, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_POST_VALIDATE] = { "POST_VAL", GLUSTERD_MGMT_V3_POST_VALIDATE, glusterd_handle_post_validate, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_VOLUME_UNLOCK] = { "VOL_UNLOCK", GLUSTERD_MGMT_V3_VOLUME_UNLOCK, glusterd_handle_volume_unlock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_NULL] = { "NULL", GLUSTERD_MGMT_V3_NULL, glusterd_mgmt_v3_null, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_LOCK] = { "MGMT_V3_LOCK", GLUSTERD_MGMT_V3_LOCK, glusterd_handle_mgmt_v3_lock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_PRE_VALIDATE] = { "PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE, glusterd_handle_pre_validate, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_BRICK_OP] = { "BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP, glusterd_handle_brick_op, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_COMMIT] = { "COMMIT", GLUSTERD_MGMT_V3_COMMIT, glusterd_handle_commit, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_POST_VALIDATE] = { "POST_VAL", GLUSTERD_MGMT_V3_POST_VALIDATE, glusterd_handle_post_validate, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_UNLOCK] = { "MGMT_V3_UNLOCK", GLUSTERD_MGMT_V3_UNLOCK, glusterd_handle_mgmt_v3_unlock, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_mgmt_v3_prog = {
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index 424bcca8a..7359169dd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -49,10 +49,10 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
}
switch (op_code){
- case GLUSTERD_MGMT_V3_VOLUME_LOCK:
+ case GLUSTERD_MGMT_V3_LOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Locking volume failed "
+ "Locking failed "
"on %s. %s", peer_str, err_str);
break;
}
@@ -84,10 +84,10 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
"on %s. %s", peer_str, err_str);
break;
}
- case GLUSTERD_MGMT_V3_VOLUME_UNLOCK:
+ case GLUSTERD_MGMT_V3_UNLOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Unlocking volume failed "
+ "Unlocking failed "
"on %s. %s", peer_str, err_str);
break;
}
@@ -199,13 +199,13 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, dict_t *dict,
}
int32_t
-gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
int ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
int op_ret = -1;
int op_errno = -1;
@@ -226,7 +226,7 @@ gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
if (ret < 0)
goto out;
@@ -236,29 +236,29 @@ gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
op_errno = rsp.op_errno;
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
- peerinfo, rsp.uuid);
+ GLUSTERD_MGMT_V3_LOCK,
+ peerinfo, rsp.uuid);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
}
int32_t
-gd_mgmt_v3_vol_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_vol_lock_cbk_fn);
+ gd_mgmt_v3_lock_cbk_fn);
}
int
-gd_mgmt_v3_vol_lock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_lock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
{
int ret = -1;
- gd1_mgmt_v3_vol_lock_req req = {{0},};
+ gd1_mgmt_v3_lock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
GF_ASSERT(op_ctx);
@@ -276,9 +276,9 @@ gd_mgmt_v3_vol_lock (glusterd_op_t op, dict_t *op_ctx,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
- gd_mgmt_v3_vol_lock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_lock_req);
+ GLUSTERD_MGMT_V3_LOCK,
+ gd_mgmt_v3_lock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_lock_req);
synclock_lock (&conf->big_lock);
out:
gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
@@ -302,18 +302,18 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- /* Volume(s) lock on local node */
+ /* mgmt_v3 lock on local node */
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- /* Trying to acquire volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_lock (dict, MY_UUID);
+ /* Trying to acquire multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID);
if (ret) {
gf_log ("", GF_LOG_ERROR,
- "Failed to acquire volume locks on localhost");
+ "Failed to acquire mgmt_v3 locks on localhost");
goto out;
}
} else {
- ret = glusterd_volume_lock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_lock (volname, MY_UUID);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire local lock for %s", volname);
@@ -328,13 +328,13 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
goto out;
}
- /* Sending Volume lock req to other nodes in the cluster */
+ /* Sending mgmt_v3 lock req to other nodes in the cluster */
gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_vol_lock (op, dict, peerinfo, &args,
- MY_UUID, peer_uuid);
+ gd_mgmt_v3_lock (op, dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1189,13 +1189,13 @@ out:
}
int32_t
-gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
int ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
int op_ret = -1;
int op_errno = -1;
@@ -1216,7 +1216,7 @@ gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
if (ret < 0)
goto out;
@@ -1226,29 +1226,29 @@ gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
op_errno = rsp.op_errno;
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
- peerinfo, rsp.uuid);
+ GLUSTERD_MGMT_V3_UNLOCK,
+ peerinfo, rsp.uuid);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
}
int32_t
-gd_mgmt_v3_vol_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_vol_unlock_cbk_fn);
+ gd_mgmt_v3_unlock_cbk_fn);
}
int
-gd_mgmt_v3_vol_unlock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_unlock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
{
int ret = -1;
- gd1_mgmt_v3_vol_unlock_req req = {{0},};
+ gd1_mgmt_v3_unlock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
GF_ASSERT(op_ctx);
@@ -1266,9 +1266,9 @@ gd_mgmt_v3_vol_unlock (glusterd_op_t op, dict_t *op_ctx,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
- gd_mgmt_v3_vol_unlock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_unlock_req);
+ GLUSTERD_MGMT_V3_UNLOCK,
+ gd_mgmt_v3_unlock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_unlock_req);
synclock_lock (&conf->big_lock);
out:
gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
@@ -1301,13 +1301,13 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
goto out;
}
- /* Sending Volume unlock req to other nodes in the cluster */
+ /* Sending mgmt_v3 unlock req to other nodes in the cluster */
gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_vol_unlock (op, dict, peerinfo, &args,
- MY_UUID, peer_uuid);
+ gd_mgmt_v3_unlock (op, dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1389,12 +1389,11 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
- /* LOCKDOWN PHASE - Based on the number of volumes either single
- * or multiple volume locks is acquired */
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
- npeers, &is_acquired);
+ npeers, &is_acquired);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Volume lockdown failed.");
+ gf_log ("", GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
}
@@ -1447,13 +1446,13 @@ out:
ret = dict_get_str (tmp_dict, "volname", &volname);
if (ret) {
- /* Trying to release volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID);
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
if (ret)
gf_log ("", GF_LOG_ERROR,
- "Failed to release volume locks on localhost");
+ "Failed to release mgmt_v3 locks on localhost");
} else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release local lock for %s", volname);
@@ -1536,12 +1535,11 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
- /* LOCKDOWN PHASE - Based on the number of volumes either single
- * or multiple volume locks is acquired */
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
npeers, &is_acquired);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Volume lockdown failed.");
+ gf_log ("", GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
}
@@ -1642,13 +1640,13 @@ out:
ret = dict_get_str (tmp_dict, "volname", &volname);
if (ret) {
- /* Trying to release volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID);
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
if (ret)
gf_log ("", GF_LOG_ERROR,
- "Failed to release volume locks on localhost");
+ "Failed to release mgmt_v3 locks on localhost");
} else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release local lock for %s", volname);
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 625434bc8..05e4b71a0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -2558,7 +2558,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- /* Based on the op_version, acquire a cluster or volume lock */
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_LOCK];
if (proc->fn) {
@@ -2578,7 +2578,8 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
dict = glusterd_op_get_ctx ();
dict_ref (dict);
- proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_VOLUME_LOCK];
+ proc = &peerinfo->mgmt_v3->proctable
+ [GLUSTERD_MGMT_V3_LOCK];
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo",
peerinfo);
@@ -2592,7 +2593,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn (NULL, this, dict);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
- "Failed to send volume lock "
+ "Failed to send mgmt_v3 lock "
"request for operation "
"'Volume %s' to peer %s",
gd_op_list[opinfo.op],
@@ -2638,7 +2639,8 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- /* Based on the op_version, release the cluster or volume lock */
+ /* Based on the op_version, release the *
+ * cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_UNLOCK];
if (proc->fn) {
@@ -2658,7 +2660,8 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
dict = glusterd_op_get_ctx ();
dict_ref (dict);
- proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_VOLUME_UNLOCK];
+ proc = &peerinfo->mgmt_v3->proctable
+ [GLUSTERD_MGMT_V3_UNLOCK];
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo",
peerinfo);
@@ -2672,7 +2675,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn (NULL, this, dict);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
- "Failed to send volume unlock "
+ "Failed to send mgmt_v3 unlock "
"request for operation "
"'Volume %s' to peer %s",
gd_op_list[opinfo.op],
@@ -2736,7 +2739,7 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
/* If the req came from a node running on older op_version
* the dict won't be present. Based on it acquiring a cluster
- * or volume lock */
+ * or mgmt_v3 lock */
if (lock_ctx->dict == NULL) {
ret = glusterd_lock (lock_ctx->uuid);
glusterd_op_lock_send_resp (lock_ctx->req, ret);
@@ -2746,15 +2749,15 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire volname");
else {
- ret = glusterd_volume_lock (volname, lock_ctx->uuid);
+ ret = glusterd_mgmt_v3_lock (volname, lock_ctx->uuid);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire lock for %s",
volname);
}
- glusterd_op_volume_lock_send_resp (lock_ctx->req,
- &event->txn_id, ret);
+ glusterd_op_mgmt_v3_lock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
dict_unref (lock_ctx->dict);
}
@@ -2783,7 +2786,7 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
/* If the req came from a node running on older op_version
* the dict won't be present. Based on it releasing the cluster
- * or volume lock */
+ * or mgmt_v3 lock */
if (lock_ctx->dict == NULL) {
ret = glusterd_unlock (lock_ctx->uuid);
glusterd_op_unlock_send_resp (lock_ctx->req, ret);
@@ -2793,14 +2796,14 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire volname");
else {
- ret = glusterd_volume_unlock (volname, lock_ctx->uuid);
+ ret = glusterd_mgmt_v3_unlock (volname, lock_ctx->uuid);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s", volname);
}
- glusterd_op_volume_unlock_send_resp (lock_ctx->req,
- &event->txn_id, ret);
+ glusterd_op_mgmt_v3_unlock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
dict_unref (lock_ctx->dict);
}
@@ -4063,7 +4066,7 @@ glusterd_op_txn_complete (uuid_t *txn_id)
glusterd_op_reset_ctx ();
glusterd_op_clear_errstr ();
- /* Based on the op-version, we release the cluster or volume lock */
+ /* Based on the op-version, we release the cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
ret = glusterd_unlock (MY_UUID);
/* unlock cant/shouldnt fail here!! */
@@ -4079,7 +4082,7 @@ glusterd_op_txn_complete (uuid_t *txn_id)
"Unable to acquire volname");
if (volname) {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s",
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 4925b9af7..d5200a4ae 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -647,10 +647,10 @@ glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
}
static int32_t
-glusterd_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
int ret = -1;
int32_t op_ret = -1;
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
@@ -669,10 +669,10 @@ glusterd_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
if (ret < 0) {
gf_log (this->name, GF_LOG_ERROR,
- "Failed to decode volume lock "
+ "Failed to decode mgmt_v3 lock "
"response received from peer");
rsp.op_ret = -1;
rsp.op_errno = EINVAL;
@@ -685,13 +685,13 @@ out:
txn_id = &rsp.txn_id;
gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
- "Received volume lock %s from uuid: %s",
+ "Received mgmt_v3 lock %s from uuid: %s",
(op_ret) ? "RJT" : "ACC", uuid_utoa (rsp.uuid));
ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
if (ret) {
gf_log (this->name, GF_LOG_CRITICAL,
- "Volume lock response received "
+ "mgmt_v3 lock response received "
"from unknown peer: %s", uuid_utoa (rsp.uuid));
}
@@ -717,18 +717,18 @@ out:
}
int32_t
-glusterd_vol_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_v3_lock_peers_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- glusterd_vol_lock_cbk_fn);
+ glusterd_mgmt_v3_lock_peers_cbk_fn);
}
static int32_t
-glusterd_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
int ret = -1;
int32_t op_ret = -1;
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
@@ -747,10 +747,10 @@ glusterd_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
if (ret < 0) {
gf_log (this->name, GF_LOG_ERROR,
- "Failed to decode volume unlock "
+ "Failed to decode mgmt_v3 unlock "
"response received from peer");
rsp.op_ret = -1;
rsp.op_errno = EINVAL;
@@ -763,7 +763,7 @@ out:
txn_id = &rsp.txn_id;
gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
- "Received volume unlock %s from uuid: %s",
+ "Received mgmt_v3 unlock %s from uuid: %s",
(op_ret) ? "RJT" : "ACC",
uuid_utoa (rsp.uuid));
@@ -771,7 +771,7 @@ out:
if (ret) {
gf_log (this->name, GF_LOG_CRITICAL,
- "Volume unlock response received "
+ "mgmt_v3 unlock response received "
"from unknown peer: %s", uuid_utoa (rsp.uuid));
}
@@ -797,11 +797,11 @@ out:
}
int32_t
-glusterd_vol_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_v3_unlock_peers_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- glusterd_vol_unlock_cbk_fn);
+ glusterd_mgmt_v3_unlock_peers_cbk_fn);
}
int32_t
@@ -1395,10 +1395,10 @@ out:
}
int32_t
-glusterd_vol_lock (call_frame_t *frame, xlator_t *this,
- void *data)
+glusterd_mgmt_v3_lock_peers (call_frame_t *frame, xlator_t *this,
+ void *data)
{
- gd1_mgmt_v3_vol_lock_req req = {{0},};
+ gd1_mgmt_v3_lock_req req = {{0},};
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *priv = NULL;
@@ -1450,19 +1450,19 @@ glusterd_vol_lock (call_frame_t *frame, xlator_t *this,
ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
peerinfo->mgmt_v3,
- GLUSTERD_MGMT_V3_VOLUME_LOCK, NULL,
- this, glusterd_vol_lock_cbk,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_req);
+ GLUSTERD_MGMT_V3_LOCK, NULL,
+ this, glusterd_mgmt_v3_lock_peers_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
-glusterd_vol_unlock (call_frame_t *frame, xlator_t *this,
- void *data)
+glusterd_mgmt_v3_unlock_peers (call_frame_t *frame, xlator_t *this,
+ void *data)
{
- gd1_mgmt_v3_vol_unlock_req req = {{0},};
+ gd1_mgmt_v3_unlock_req req = {{0},};
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *priv = NULL;
@@ -1514,9 +1514,9 @@ glusterd_vol_unlock (call_frame_t *frame, xlator_t *this,
ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
peerinfo->mgmt_v3,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK, NULL,
- this, glusterd_vol_unlock_cbk,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_req);
+ GLUSTERD_MGMT_V3_UNLOCK, NULL,
+ this, glusterd_mgmt_v3_unlock_peers_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
@@ -1937,8 +1937,8 @@ struct rpc_clnt_procedure gd_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
struct rpc_clnt_procedure gd_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
[GLUSTERD_MGMT_V3_NULL] = {"NULL", NULL },
- [GLUSTERD_MGMT_V3_VOLUME_LOCK] = {"VOLUME_LOCK", glusterd_vol_lock},
- [GLUSTERD_MGMT_V3_VOLUME_UNLOCK] = {"VOLUME_UNLOCK", glusterd_vol_unlock},
+ [GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", glusterd_mgmt_v3_lock_peers},
+ [GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK", glusterd_mgmt_v3_unlock_peers},
};
struct rpc_clnt_program gd_mgmt_prog = {
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 116dd5153..05ae2fc12 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -2149,7 +2149,7 @@ glusterd_get_cg_volume_names_lk (dict_t *dict, glusterd_snap_cg_t *cg)
}
/* Set volume name of all the volumes present in CG in dict so that
- * Jarvis can use this to acquire volume locks on all the volume
+ * Jarvis can use this to acquire mgmt_v3 locks on all the volume
* present in the CG.
*/
for (i = 0; i < cg->volume_count; ++i) {
@@ -3109,7 +3109,7 @@ glusterd_handle_snapshot_remove (rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
- /* Set volnames in the dict to get volume lock */
+ /* Set volnames in the dict to get mgmt_v3 lock */
list_for_each_entry_safe (snap_vol, tmp, &snap->volumes, vol_list) {
volcount++;
volname = gf_strdup (snap_vol->parent_volname);
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index cb8f5862f..beab4aca7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -59,17 +59,17 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
}
switch (op_code){
- case GLUSTERD_MGMT_V3_VOLUME_LOCK:
+ case GLUSTERD_MGMT_V3_LOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Locking volume failed "
+ "Locking failed "
"on %s. %s", peer_str, err_str);
break;
}
- case GLUSTERD_MGMT_V3_VOLUME_UNLOCK:
+ case GLUSTERD_MGMT_V3_UNLOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Unlocking volume failed "
+ "Unlocking failed "
"on %s. %s", peer_str, err_str);
break;
}
@@ -417,13 +417,13 @@ gd_syncop_mgmt_lock (glusterd_peerinfo_t *peerinfo, struct syncargs *args,
}
int32_t
-_gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_syncop_mgmt_v3_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
int ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
int op_ret = -1;
int op_errno = -1;
@@ -444,7 +444,7 @@ _gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
if (ret < 0)
goto out;
@@ -454,7 +454,7 @@ _gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
op_errno = rsp.op_errno;
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
+ GLUSTERD_MGMT_V3_LOCK,
peerinfo, rsp.uuid);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
@@ -462,21 +462,21 @@ out:
}
int32_t
-gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_syncop_mgmt_v3_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- _gd_syncop_mgmt_volume_lock_cbk);
+ gd_syncop_mgmt_v3_lock_cbk_fn);
}
int
-gd_syncop_mgmt_volume_lock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid, uuid_t txn_id)
+gd_syncop_mgmt_v3_lock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid, uuid_t txn_id)
{
int ret = -1;
- gd1_mgmt_v3_vol_lock_req req = {{0},};
+ gd1_mgmt_v3_lock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
GF_ASSERT(op_ctx);
@@ -495,9 +495,9 @@ gd_syncop_mgmt_volume_lock (glusterd_op_t op, dict_t *op_ctx,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
- gd_syncop_mgmt_volume_lock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_lock_req);
+ GLUSTERD_MGMT_V3_LOCK,
+ gd_syncop_mgmt_v3_lock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_lock_req);
synclock_lock (&conf->big_lock);
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
@@ -505,13 +505,13 @@ out:
}
int32_t
-_gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_syncop_mgmt_v3_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
int ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
int op_ret = -1;
int op_errno = -1;
@@ -532,7 +532,7 @@ _gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
if (ret < 0)
goto out;
@@ -545,7 +545,7 @@ _gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
op_errno = rsp.op_errno;
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
+ GLUSTERD_MGMT_V3_UNLOCK,
peerinfo, rsp.uuid);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
@@ -553,20 +553,20 @@ out:
}
int32_t
-gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_syncop_mgmt_v3_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- _gd_syncop_mgmt_volume_unlock_cbk);
+ gd_syncop_mgmt_v3_unlock_cbk_fn);
}
int
-gd_syncop_mgmt_volume_unlock (dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid, uuid_t txn_id)
+gd_syncop_mgmt_v3_unlock (dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid, uuid_t txn_id)
{
int ret = -1;
- gd1_mgmt_v3_vol_unlock_req req = {{0},};
+ gd1_mgmt_v3_unlock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
GF_ASSERT(op_ctx);
@@ -584,9 +584,9 @@ gd_syncop_mgmt_volume_unlock (dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
- gd_syncop_mgmt_volume_unlock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_unlock_req);
+ GLUSTERD_MGMT_V3_UNLOCK,
+ gd_syncop_mgmt_v3_unlock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_unlock_req);
synclock_lock (&conf->big_lock);
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
@@ -1085,8 +1085,8 @@ gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
gd_syncop_mgmt_lock (peerinfo, &args,
MY_UUID, peer_uuid);
} else
- gd_syncop_mgmt_volume_lock (op, op_ctx, peerinfo, &args,
- MY_UUID, peer_uuid, txn_id);
+ gd_syncop_mgmt_v3_lock (op, op_ctx, peerinfo, &args,
+ MY_UUID, peer_uuid, txn_id);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1314,9 +1314,9 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int op_ret,
gd_syncop_mgmt_unlock (peerinfo, &args,
MY_UUID, tmp_uuid);
} else
- gd_syncop_mgmt_volume_unlock (op_ctx, peerinfo,
- &args, MY_UUID,
- tmp_uuid, txn_id);
+ gd_syncop_mgmt_v3_unlock (op_ctx, peerinfo,
+ &args, MY_UUID,
+ tmp_uuid, txn_id);
peer_cnt++;
list_del_init (&peerinfo->op_peers_list);
}
@@ -1331,11 +1331,12 @@ out:
glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr);
glusterd_op_clear_op (op);
if (is_acquired) {
- /* Based on the op-version, we release the cluster or volume lock */
+ /* Based on the op-version, we release *
+ * the cluster or mgmt_v3 lock */
if (conf->op_version < 3)
glusterd_unlock (MY_UUID);
else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s",
@@ -1497,7 +1498,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
op = tmp_op;
- /* Based on the op_version, acquire a cluster or volume lock */
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (conf->op_version < 3) {
ret = glusterd_lock (MY_UUID);
if (ret) {
@@ -1526,7 +1527,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
goto out;
}
- ret = glusterd_volume_lock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_lock (volname, MY_UUID);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire lock for %s", volname);
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index 4d5e6310f..cd1bb880f 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -1365,7 +1365,7 @@ init (xlator_t *this)
glusterd_friend_sm_init ();
glusterd_op_sm_init ();
glusterd_opinfo_init ();
- glusterd_vol_lock_init ();
+ glusterd_mgmt_v3_lock_init ();
glusterd_txn_opinfo_dict_init ();
ret = glusterd_sm_tr_log_init (&conf->op_sm_log,
glusterd_op_sm_state_name_get,
@@ -1486,7 +1486,7 @@ fini (xlator_t *this)
if (conf->handle)
gf_store_handle_destroy (conf->handle);
glusterd_sm_tr_log_delete (&conf->op_sm_log);
- glusterd_vol_lock_fini ();
+ glusterd_mgmt_v3_lock_fini ();
glusterd_txn_opinfo_dict_fini ();
GF_FREE (conf);
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 8ba626183..296b2196d 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -606,12 +606,12 @@ int
glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status);
int
-glusterd_op_volume_lock_send_resp (rpcsvc_request_t *req,
- uuid_t *txn_id, int32_t status);
+glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req,
+ uuid_t *txn_id, int32_t status);
int
-glusterd_op_volume_unlock_send_resp (rpcsvc_request_t *req,
- uuid_t *txn_id, int32_t status);
+glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req,
+ uuid_t *txn_id, int32_t status);
int
glusterd_op_stage_send_resp (rpcsvc_request_t *req,