summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--libglusterfs/src/mem-types.h7
-rw-r--r--rpc/rpc-lib/src/protocol-common.h2
-rw-r--r--rpc/xdr/src/glusterd1-xdr.c78
-rw-r--r--rpc/xdr/src/glusterd1-xdr.h53
-rw-r--r--rpc/xdr/src/glusterd1-xdr.x29
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c454
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c179
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.h38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c519
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h15
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c300
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c431
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c32
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h13
20 files changed, 1996 insertions, 187 deletions
diff --git a/libglusterfs/src/mem-types.h b/libglusterfs/src/mem-types.h
index 3b0f7d276..04327b080 100644
--- a/libglusterfs/src/mem-types.h
+++ b/libglusterfs/src/mem-types.h
@@ -115,7 +115,10 @@ enum gf_common_mem_types_ {
gf_common_mt_client_ctx = 99,
gf_common_mt_lock_table = 100,
gf_common_mt_locker = 101,
- gf_common_mt_auxgids = 102,
- gf_common_mt_end = 103
+ gf_common_mt_auxgids = 102,
+ gf_common_mt_uuid_t = 103,
+ gf_common_mt_vol_lock_obj_t = 104,
+ gf_common_mt_txn_opinfo_obj_t = 105,
+ gf_common_mt_end = 106
};
#endif
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index 704b1540a..5ae5e6223 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -169,6 +169,8 @@ enum glusterd_mgmt_procnum {
GLUSTERD_MGMT_CLUSTER_UNLOCK,
GLUSTERD_MGMT_STAGE_OP,
GLUSTERD_MGMT_COMMIT_OP,
+ GLUSTERD_MGMT_VOLUME_LOCK,
+ GLUSTERD_MGMT_VOLUME_UNLOCK,
GLUSTERD_MGMT_MAXVALUE,
};
diff --git a/rpc/xdr/src/glusterd1-xdr.c b/rpc/xdr/src/glusterd1-xdr.c
index 213b48bc6..6c6514c90 100644
--- a/rpc/xdr/src/glusterd1-xdr.c
+++ b/rpc/xdr/src/glusterd1-xdr.c
@@ -491,3 +491,81 @@ xdr_gd1_mgmt_brick_op_rsp (XDR *xdrs, gd1_mgmt_brick_op_rsp *objp)
return FALSE;
return TRUE;
}
+
+bool_t
+xdr_gd1_mgmt_volume_lock_req (XDR *xdrs, gd1_mgmt_volume_lock_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->txn_id, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_volume_lock_rsp (XDR *xdrs, gd1_mgmt_volume_lock_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->txn_id, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_volume_unlock_req (XDR *xdrs, gd1_mgmt_volume_unlock_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->txn_id, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_mgmt_volume_unlock_rsp (XDR *xdrs, gd1_mgmt_volume_unlock_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_vector (xdrs, (char *)objp->txn_id, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
diff --git a/rpc/xdr/src/glusterd1-xdr.h b/rpc/xdr/src/glusterd1-xdr.h
index c35930cad..4115ff7a8 100644
--- a/rpc/xdr/src/glusterd1-xdr.h
+++ b/rpc/xdr/src/glusterd1-xdr.h
@@ -202,6 +202,51 @@ struct gd1_mgmt_brick_op_rsp {
};
typedef struct gd1_mgmt_brick_op_rsp gd1_mgmt_brick_op_rsp;
+struct gd1_mgmt_volume_lock_req {
+ u_char uuid[16];
+ u_char txn_id[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_volume_lock_req gd1_mgmt_volume_lock_req;
+
+struct gd1_mgmt_volume_lock_rsp {
+ u_char uuid[16];
+ u_char txn_id[16];
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_volume_lock_rsp gd1_mgmt_volume_lock_rsp;
+
+struct gd1_mgmt_volume_unlock_req {
+ u_char uuid[16];
+ u_char txn_id[16];
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_mgmt_volume_unlock_req gd1_mgmt_volume_unlock_req;
+
+struct gd1_mgmt_volume_unlock_rsp {
+ u_char uuid[16];
+ u_char txn_id[16];
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_mgmt_volume_unlock_rsp gd1_mgmt_volume_unlock_rsp;
+
/* the xdr functions */
#if defined(__STDC__) || defined(__cplusplus)
@@ -224,6 +269,10 @@ extern bool_t xdr_gd1_mgmt_friend_update (XDR *, gd1_mgmt_friend_update*);
extern bool_t xdr_gd1_mgmt_friend_update_rsp (XDR *, gd1_mgmt_friend_update_rsp*);
extern bool_t xdr_gd1_mgmt_brick_op_req (XDR *, gd1_mgmt_brick_op_req*);
extern bool_t xdr_gd1_mgmt_brick_op_rsp (XDR *, gd1_mgmt_brick_op_rsp*);
+extern bool_t xdr_gd1_mgmt_volume_lock_req (XDR *, gd1_mgmt_volume_lock_req*);
+extern bool_t xdr_gd1_mgmt_volume_lock_rsp (XDR *, gd1_mgmt_volume_lock_rsp*);
+extern bool_t xdr_gd1_mgmt_volume_unlock_req (XDR *, gd1_mgmt_volume_unlock_req*);
+extern bool_t xdr_gd1_mgmt_volume_unlock_rsp (XDR *, gd1_mgmt_volume_unlock_rsp*);
#else /* K&R C */
extern bool_t xdr_glusterd_volume_status ();
@@ -245,6 +294,10 @@ extern bool_t xdr_gd1_mgmt_friend_update ();
extern bool_t xdr_gd1_mgmt_friend_update_rsp ();
extern bool_t xdr_gd1_mgmt_brick_op_req ();
extern bool_t xdr_gd1_mgmt_brick_op_rsp ();
+extern bool_t xdr_gd1_mgmt_volume_lock_req ();
+extern bool_t xdr_gd1_mgmt_volume_lock_rsp ();
+extern bool_t xdr_gd1_mgmt_volume_unlock_req ();
+extern bool_t xdr_gd1_mgmt_volume_unlock_rsp ();
#endif /* K&R C */
diff --git a/rpc/xdr/src/glusterd1-xdr.x b/rpc/xdr/src/glusterd1-xdr.x
index fc1bb58b4..f29a9d214 100644
--- a/rpc/xdr/src/glusterd1-xdr.x
+++ b/rpc/xdr/src/glusterd1-xdr.x
@@ -125,3 +125,32 @@ struct gd1_mgmt_brick_op_rsp {
opaque output<>;
string op_errstr<>;
} ;
+
+struct gd1_mgmt_volume_lock_req {
+ unsigned char uuid[16];
+ unsigned char txn_id[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_volume_lock_rsp {
+ unsigned char uuid[16];
+ unsigned char txn_id[16];
+ opaque dict<>;
+ int op_ret;
+ int op_errno;
+} ;
+
+struct gd1_mgmt_volume_unlock_req {
+ unsigned char uuid[16];
+ unsigned char txn_id[16];
+ opaque dict<>;
+} ;
+
+struct gd1_mgmt_volume_unlock_rsp {
+ unsigned char uuid[16];
+ unsigned char txn_id[16];
+ opaque dict<>;
+ int op_ret;
+ int op_errno;
+} ;
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index 6d87b9469..f4b663729 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -12,7 +12,7 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-geo-rep.c glusterd-replace-brick.c glusterd-log-ops.c \
glusterd-volume-ops.c glusterd-brick-ops.c glusterd-mountbroker.c \
glusterd-syncop.c glusterd-hooks.c glusterd-volume-set.c \
- glusterd-snapshot.c
+ glusterd-locks.c glusterd-snapshot.c
glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la \
@@ -22,7 +22,7 @@ glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
glusterd-sm.h glusterd-store.h glusterd-mem-types.h \
glusterd-pmap.h glusterd-volgen.h glusterd-mountbroker.h \
- glusterd-syncop.h glusterd-hooks.h
+ glusterd-syncop.h glusterd-hooks.h glusterd-locks.h
AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(rpclibdir) -I$(CONTRIBDIR)/rbtree \
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index e5b7eb566..de179ef47 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -1457,7 +1457,7 @@ glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
goto out;
}
- if (is_origin_glusterd ()) {
+ if (is_origin_glusterd (dict)) {
ret = glusterd_generate_and_set_task_id
(dict, GF_REMOVE_BRICK_TID_KEY);
if (ret) {
@@ -1704,7 +1704,7 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
/* Set task-id, if available, in ctx dict for operations other than
* start
*/
- if (is_origin_glusterd () && (cmd != GF_OP_CMD_START)) {
+ if (is_origin_glusterd (dict) && (cmd != GF_OP_CMD_START)) {
if (!uuid_is_null (volinfo->rebal.rebalance_id)) {
ret = glusterd_copy_uuid_to_dict
(volinfo->rebal.rebalance_id, dict,
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 93c6959f1..7239774aa 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -33,6 +33,7 @@
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
#include "glusterd-store.h"
+#include "glusterd-locks.h"
#include "glusterd1-xdr.h"
#include "cli1-xdr.h"
@@ -54,6 +55,8 @@
#include <lvm2app.h>
#endif
+extern uuid_t global_txn_id;
+
int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event,
void *data, rpc_clnt_notify_t notify_fn)
@@ -488,10 +491,17 @@ int32_t
glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
char *err_str, size_t err_len)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- int32_t locked = 0;
+ int32_t ret = -1;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t locked = 0;
+ char *tmp = NULL;
+ char *volname = NULL;
+ uuid_t *txn_id = NULL;
+ uuid_t *originator_uuid = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
GF_ASSERT (req);
GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX));
@@ -502,33 +512,139 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
priv = this->private;
GF_ASSERT (priv);
- ret = glusterd_lock (MY_UUID);
+ dict = ctx;
+
+ /* Generate a transaction-id for this operation and
+ * save it in the dict. This transaction id distinguishes
+ * each transaction, and helps separate opinfos in the
+ * op state machine. */
+ txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!txn_id)
+ goto out;
+
+ uuid_generate (*txn_id);
+
+ ret = dict_set_bin (dict, "transaction_id",
+ txn_id, sizeof (uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set transaction id.");
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Transaction_id = %s", uuid_utoa (*txn_id));
+
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ originator_uuid = GF_CALLOC (1, sizeof(uuid_t),
+ gf_common_mt_uuid_t);
+ if (!originator_uuid) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (*originator_uuid, MY_UUID);
+ ret = dict_set_bin (dict, "originator_uuid",
+ originator_uuid, sizeof (uuid_t));
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire lock on localhost, ret: %d", ret);
- snprintf (err_str, err_len, "Another transaction is in progress. "
- "Please try again after sometime.");
+ "Failed to set originator uuid.");
goto out;
}
+ /* Based on the op_version, acquire a cluster or volume lock */
+ if (priv->op_version < 3) {
+ ret = glusterd_lock (MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock on localhost, ret: %d",
+ ret);
+ snprintf (err_str, err_len,
+ "Another transaction is in progress. "
+ "Please try again after sometime.");
+ goto out;
+ }
+ } else {
+ /* If no volname is given as a part of the command, locks will
+ * not be held */
+ ret = dict_get_str (dict, "volname", &tmp);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Failed to get volume "
+ "name");
+ goto local_locking_done;
+ } else {
+ /* Use a copy of volname, as cli response will be
+ * sent before the unlock, and the volname in the
+ * dict, might be removed */
+ volname = gf_strdup (tmp);
+ if (!volname)
+ goto out;
+ }
+
+ ret = glusterd_volume_lock (volname, MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock for %s", volname);
+ snprintf (err_str, err_len,
+ "Another transaction is in progress for %s. "
+ "Please try again after sometime.", volname);
+ goto out;
+ }
+ }
+
locked = 1;
gf_log (this->name, GF_LOG_DEBUG, "Acquired lock on localhost");
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_START_LOCK, NULL);
+local_locking_done:
+
+ /* If no volname is given as a part of the command, locks will
+ * not be held, hence sending stage event. */
+ if (volname)
+ event_type = GD_OP_EVENT_START_LOCK;
+ else {
+ txn_op_info.state.state = GD_OP_STATE_LOCK_SENT;
+ event_type = GD_OP_EVENT_ALL_ACC;
+ }
+
+ /* Save opinfo for this transaction with the transaction id */
+ txn_op_info.op = op;
+ txn_op_info.op_ctx = ctx;
+ txn_op_info.req = req;
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, ctx);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to acquire cluster"
" lock.");
goto out;
}
- glusterd_op_set_op (op);
- glusterd_op_set_ctx (ctx);
- glusterd_op_set_req (req);
-
-
out:
- if (locked && ret)
- glusterd_unlock (MY_UUID);
+ if (locked && ret) {
+ /* Based on the op-version, we release the
+ * cluster or volume lock */
+ if (priv->op_version < 3)
+ glusterd_unlock (MY_UUID);
+ else {
+ ret = glusterd_volume_unlock (volname, MY_UUID);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s",
+ volname);
+ ret = -1;
+ }
+ }
+
+ if (volname)
+ GF_FREE (volname);
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
@@ -542,6 +658,8 @@ __glusterd_handle_cluster_lock (rpcsvc_request_t *req)
glusterd_op_lock_ctx_t *ctx = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
+ glusterd_op_info_t txn_op_info = {{0},};
this = THIS;
GF_ASSERT (this);
@@ -576,8 +694,29 @@ __glusterd_handle_cluster_lock (rpcsvc_request_t *req)
uuid_copy (ctx->uuid, lock_req.uuid);
ctx->req = req;
+ ctx->dict = NULL;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, ctx);
+ txn_op_info.op = GD_OP_EVENT_LOCK;
+ txn_op_info.op_ctx = dict_new ();
+ if (!txn_op_info.op_ctx) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set new dict");
+ goto out;
+ }
+ txn_op_info.req = req;
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ dict_destroy (txn_op_info.op_ctx);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, txn_id, ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_LOCK");
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -595,6 +734,187 @@ glusterd_handle_cluster_lock (rpcsvc_request_t *req)
__glusterd_handle_cluster_lock);
}
+static int
+glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
+{
+ gd1_mgmt_volume_lock_req lock_req = {{0},};
+ int32_t ret = -1;
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ glusterd_op_info_t txn_op_info = {{0},};
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_mgmt_volume_lock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG, "Received volume lock req "
+ "from uuid: %s txn_id: %s", uuid_utoa (lock_req.uuid),
+ uuid_utoa (lock_req.txn_id));
+
+ if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
+ if (!ctx) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+
+ ctx->dict = dict_new ();
+ if (!ctx->dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (lock_req.dict.dict_val,
+ lock_req.dict.dict_len, &ctx->dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ txn_op_info.op = lock_req.op;
+ txn_op_info.op_ctx = ctx->dict;
+ txn_op_info.req = req;
+
+ ret = glusterd_set_txn_opinfo (&lock_req.txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK,
+ &lock_req.txn_id, ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_LOCK");
+
+out:
+ if (ret) {
+ if (ctx->dict)
+ dict_destroy (ctx->dict);
+ if (ctx)
+ GF_FREE (ctx);
+ }
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_handle_volume_lock (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_volume_lock_fn);
+}
+
+static int
+glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
+{
+ gd1_mgmt_volume_unlock_req lock_req = {{0},};
+ int32_t ret = -1;
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_mgmt_volume_unlock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG, "Received volume unlock req "
+ "from uuid: %s", uuid_utoa (lock_req.uuid));
+
+ if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
+ if (!ctx) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+
+ ctx->dict = dict_new ();
+ if (!ctx->dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (lock_req.dict.dict_val,
+ lock_req.dict.dict_len, &ctx->dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK,
+ &lock_req.txn_id, ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_UNLOCK");
+
+out:
+ if (ret) {
+ if (ctx->dict)
+ dict_destroy (ctx->dict);
+ if (ctx)
+ GF_FREE (ctx);
+ }
+
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_handle_volume_unlock (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_volume_unlock_fn);
+}
+
int
glusterd_req_ctx_create (rpcsvc_request_t *rpc_req,
glusterd_op_t op, uuid_t uuid,
@@ -653,6 +973,8 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
gd1_mgmt_stage_op_req op_req = {{0},};
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
+ glusterd_op_info_t txn_op_info = {{0},};
this = THIS;
GF_ASSERT (this);
@@ -681,7 +1003,37 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
if (ret)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP, req_ctx);
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
+ /* In cases where there is no volname, the receivers won't have a
+ * transaction opinfo created, as for those operations, the locking
+ * phase where the transaction opinfos are created, won't be called. */
+ ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get transaction's opinfo");
+
+ txn_op_info.op = op_req.op;
+ txn_op_info.state.state = GD_OP_STATE_LOCKED;
+ txn_op_info.op_ctx = req_ctx->dict;
+ txn_op_info.req = req;
+
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+ dict_destroy (req_ctx->dict);
+ goto out;
+ }
+ }
+
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP,
+ txn_id, req_ctx);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to inject event GD_OP_EVENT_STAGE_OP");
out:
free (op_req.buf.buf_val);//malloced by xdr
@@ -705,6 +1057,7 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req)
gd1_mgmt_commit_op_req op_req = {{0},};
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
this = THIS;
GF_ASSERT (this);
@@ -735,11 +1088,12 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req)
if (ret)
goto out;
- ret = glusterd_op_init_ctx (op_req.op);
- if (ret)
- goto out;
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP, req_ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP,
+ txn_id, req_ctx);
out:
free (op_req.buf.buf_val);//malloced by xdr
@@ -1863,6 +2217,56 @@ glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
}
int
+glusterd_op_volume_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
+{
+
+ gd1_mgmt_volume_lock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ GF_ASSERT (txn_id);
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ uuid_copy (rsp.txn_id, *txn_id);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_volume_lock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to volume lock, ret: %d",
+ ret);
+
+ return ret;
+}
+
+int
+glusterd_op_volume_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
+{
+
+ gd1_mgmt_volume_unlock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ GF_ASSERT (txn_id);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ glusterd_get_uuid (&rsp.uuid);
+ uuid_copy (rsp.txn_id, *txn_id);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_volume_unlock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to volume unlock, ret: %d",
+ ret);
+
+ return ret;
+}
+
+int
__glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
{
gd1_mgmt_cluster_unlock_req unlock_req = {{0}, };
@@ -1870,6 +2274,7 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
glusterd_op_lock_ctx_t *ctx = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
this = THIS;
GF_ASSERT (this);
@@ -1904,8 +2309,9 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
}
uuid_copy (ctx->uuid, unlock_req.uuid);
ctx->req = req;
+ ctx->dict = NULL;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, txn_id, ctx);
out:
glusterd_friend_sm ();
@@ -3877,6 +4283,8 @@ rpcsvc_actor_t gd_svc_mgmt_actors[] = {
[GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, 0, DRC_NA},
[GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, 0, DRC_NA},
[GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_VOLUME_LOCK] = { "VOLUME_LOCK", GLUSTERD_MGMT_VOLUME_LOCK, glusterd_handle_volume_lock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_VOLUME_UNLOCK] = { "VOLUME_UNLOCK", GLUSTERD_MGMT_VOLUME_UNLOCK, glusterd_handle_volume_unlock, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_mgmt_prog = {
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
new file mode 100644
index 000000000..c09ba33a7
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -0,0 +1,179 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "common-utils.h"
+#include "cli1-xdr.h"
+#include "xdr-generic.h"
+#include "glusterd.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-store.h"
+#include "glusterd-utils.h"
+#include "glusterd-volgen.h"
+#include "glusterd-locks.h"
+#include "run.h"
+#include "syscall.h"
+
+#include <signal.h>
+
+static dict_t *vol_lock;
+
+/* Initialize the global vol-lock list(dict) when
+ * glusterd is spawned */
+int32_t
+glusterd_vol_lock_init ()
+{
+ int32_t ret = -1;
+
+ vol_lock = dict_new ();
+ if (!vol_lock) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/* Destroy the global vol-lock list(dict) when
+ * glusterd cleanup is performed */
+void
+glusterd_vol_lock_fini ()
+{
+ if (vol_lock)
+ dict_destroy (vol_lock);
+}
+
+int32_t
+glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid)
+{
+ int32_t ret = -1;
+ vol_lock_obj *lock_obj = NULL;
+ uuid_t no_owner = {"\0"};
+
+ if (!volname || !uuid) {
+ gf_log ("", GF_LOG_ERROR, "volname or uuid is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_bin(vol_lock, volname, (void **) &lock_obj);
+ if (!ret)
+ uuid_copy (*uuid, lock_obj->lock_owner);
+ else
+ uuid_copy (*uuid, no_owner);
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_volume_lock (char *volname, uuid_t uuid)
+{
+ int32_t ret = -1;
+ vol_lock_obj *lock_obj = NULL;
+ uuid_t owner = {0};
+
+ if (!volname) {
+ gf_log ("", GF_LOG_ERROR, "volname is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_get_vol_lock_owner (volname, &owner);
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "Unable to get volume lock owner");
+ goto out;
+ }
+
+ /* If the lock has already been held for the given volume
+ * we fail */
+ if (!uuid_is_null (owner)) {
+ gf_log ("", GF_LOG_ERROR, "Unable to acquire lock. "
+ "Lock for %s held by %s", volname,
+ uuid_utoa (owner));
+ ret = -1;
+ goto out;
+ }
+
+ lock_obj = GF_CALLOC (1, sizeof(vol_lock_obj),
+ gf_common_mt_vol_lock_obj_t);
+ if (!lock_obj) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (lock_obj->lock_owner, uuid);
+
+ ret = dict_set_bin (vol_lock, volname, lock_obj, sizeof(vol_lock_obj));
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to set lock owner "
+ "in volume lock");
+ if (lock_obj)
+ GF_FREE (lock_obj);
+ goto out;
+ }
+
+ gf_log ("", GF_LOG_DEBUG, "Lock for %s successfully held by %s",
+ volname, uuid_utoa (uuid));
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_volume_unlock (char *volname, uuid_t uuid)
+{
+ int32_t ret = -1;
+ uuid_t owner = {0};
+
+ if (!volname) {
+ gf_log ("", GF_LOG_ERROR, "volname is null.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_get_vol_lock_owner (volname, &owner);
+ if (ret)
+ goto out;
+
+ if (uuid_is_null (owner)) {
+ gf_log ("", GF_LOG_ERROR, "Lock for %s not held", volname);
+ ret = -1;
+ goto out;
+ }
+
+ ret = uuid_compare (uuid, owner);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Lock for %s held by %s. "
+ "Unlock req received from %s", volname,
+ uuid_utoa (owner), uuid_utoa (uuid));
+ goto out;
+ }
+
+ /* Removing the volume lock from the global list */
+ dict_del (vol_lock, volname);
+
+ gf_log ("", GF_LOG_DEBUG, "Lock for %s successfully released",
+ volname);
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h
new file mode 100644
index 000000000..2a8cc20ed
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.h
@@ -0,0 +1,38 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _GLUSTERD_LOCKS_H_
+#define _GLUSTERD_LOCKS_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+struct volume_lock_object_ {
+ uuid_t lock_owner;
+};
+typedef struct volume_lock_object_ vol_lock_obj;
+
+int32_t
+glusterd_vol_lock_init ();
+
+void
+glusterd_vol_lock_fini ();
+
+int32_t
+glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid);
+
+int32_t
+glusterd_volume_lock (char *volname, uuid_t uuid);
+
+int32_t
+glusterd_volume_unlock (char *volname, uuid_t uuid);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 571fc1c78..6c41d6a21 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -37,6 +37,7 @@
#include "glusterd-store.h"
#include "glusterd-hooks.h"
#include "glusterd-volgen.h"
+#include "glusterd-locks.h"
#include "syscall.h"
#include "cli1-xdr.h"
#include "common-utils.h"
@@ -67,6 +68,133 @@
static struct list_head gd_op_sm_queue;
pthread_mutex_t gd_op_sm_lock;
glusterd_op_info_t opinfo = {{0},};
+uuid_t global_txn_id = {"\0"}; /* To be used in
+ * heterogeneous
+ * cluster with no
+ * transaction ids */
+
+static dict_t *txn_opinfo;
+
+struct txn_opinfo_object_ {
+ glusterd_op_info_t opinfo;
+};
+typedef struct txn_opinfo_object_ txn_opinfo_obj;
+
+int32_t
+glusterd_txn_opinfo_init ()
+{
+ int32_t ret = -1;
+
+ txn_opinfo = dict_new ();
+ if (!txn_opinfo) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+void
+glusterd_txn_opinfo_fini ()
+{
+ if (txn_opinfo)
+ dict_destroy (txn_opinfo);
+}
+
+int32_t
+glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
+{
+ int32_t ret = -1;
+ txn_opinfo_obj *opinfo_obj = NULL;
+
+ if (!txn_id || !opinfo) {
+ gf_log ("", GF_LOG_ERROR,
+ "Empty transaction id or opinfo received.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_bin(txn_opinfo, uuid_utoa (*txn_id),
+ (void **) &opinfo_obj);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to get transaction opinfo");
+ goto out;
+ }
+
+ (*opinfo) = opinfo_obj->opinfo;
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
+{
+ int32_t ret = -1;
+ txn_opinfo_obj *opinfo_obj = NULL;
+
+ if (!txn_id) {
+ gf_log ("", GF_LOG_ERROR, "Empty transaction id received.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_bin(txn_opinfo, uuid_utoa (*txn_id),
+ (void **) &opinfo_obj);
+ if (ret) {
+ opinfo_obj = GF_CALLOC (1, sizeof(txn_opinfo_obj),
+ gf_common_mt_txn_opinfo_obj_t);
+ if (!opinfo_obj) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_bin(txn_opinfo, uuid_utoa (*txn_id), opinfo_obj,
+ sizeof(txn_opinfo_obj));
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Unable to set opinfo for transaction ID : %s",
+ uuid_utoa (*txn_id));
+ goto out;
+ }
+ }
+
+ opinfo_obj->opinfo = (*opinfo);
+
+ ret = 0;
+out:
+ if (ret)
+ if (opinfo_obj)
+ GF_FREE (opinfo_obj);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_clear_txn_opinfo (uuid_t *txn_id)
+{
+ int32_t ret = -1;
+
+ if (!txn_id) {
+ gf_log ("", GF_LOG_ERROR, "Empty transaction id received.");
+ ret = -1;
+ goto out;
+ }
+
+ dict_del(txn_opinfo, uuid_utoa (*txn_id));
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
static int glusterfs_port = GLUSTERD_DEFAULT_PORT;
static char *glusterd_op_sm_state_names[] = {
"Default",
@@ -147,10 +275,10 @@ glusterd_is_volume_started (glusterd_volinfo_t *volinfo)
}
static int
-glusterd_op_sm_inject_all_acc ()
+glusterd_op_sm_inject_all_acc (uuid_t *txn_id)
{
int32_t ret = -1;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC, txn_id, NULL);
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -416,11 +544,17 @@ glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
if (!val_dict)
goto out;
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to get volume name");
+ goto out;
+ }
+
/* Check if we can support the required op-version
* This check is not done on the originator glusterd. The originator
* glusterd sets this value.
*/
- origin_glusterd = is_origin_glusterd ();
+ origin_glusterd = is_origin_glusterd (dict);
if (!origin_glusterd) {
/* Check for v3.3.x origin glusterd */
@@ -2124,13 +2258,13 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
GF_ASSERT (dict);
- origin_glusterd = is_origin_glusterd ();
+ origin_glusterd = is_origin_glusterd (dict);
ret = dict_get_uint32 (dict, "cmd", &cmd);
if (ret)
goto out;
- if (is_origin_glusterd ()) {
+ if (origin_glusterd) {
ret = 0;
if ((cmd & GF_CLI_STATUS_ALL)) {
ret = glusterd_get_all_volnames (rsp_dict);
@@ -2329,6 +2463,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
uint32_t pending_count = 0;
+ dict_t *dict = NULL;
this = THIS;
priv = this->private;
@@ -2343,27 +2478,59 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_LOCK];
- if (proc->fn) {
- ret = proc->fn (NULL, this, peerinfo);
- if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "Failed to "
- "send lock request for operation "
- "'Volume %s' to peer %s",
- gd_op_list[opinfo.op],
- peerinfo->hostname);
- continue;
+ /* Based on the op_version, acquire a cluster or volume lock */
+ if (priv->op_version < 3) {
+ proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_LOCK];
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to send lock request "
+ "for operation 'Volume %s' to "
+ "peer %s",
+ gd_op_list[opinfo.op],
+ peerinfo->hostname);
+ continue;
+ }
+ pending_count++;
+ }
+ } else {
+ dict = glusterd_op_get_ctx ();
+ dict_ref (dict);
+
+ proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_VOLUME_LOCK];
+ if (proc->fn) {
+ ret = dict_set_static_ptr (dict, "peerinfo",
+ peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to set peerinfo");
+ dict_unref (dict);
+ goto out;
+ }
+
+ ret = proc->fn (NULL, this, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to send volume lock "
+ "request for operation "
+ "'Volume %s' to peer %s",
+ gd_op_list[opinfo.op],
+ peerinfo->hostname);
+ dict_unref (dict);
+ continue;
+ }
+ pending_count++;
}
- pending_count++;
}
}
opinfo.pending_count = pending_count;
if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
+out:
gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);
-
return ret;
}
@@ -2376,17 +2543,12 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
uint32_t pending_count = 0;
+ dict_t *dict = NULL;
this = THIS;
priv = this->private;
GF_ASSERT (priv);
- /*ret = glusterd_unlock (MY_UUID);
-
- if (ret)
- goto out;
- */
-
list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
GF_ASSERT (peerinfo);
@@ -2396,29 +2558,60 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_UNLOCK];
- if (proc->fn) {
- ret = proc->fn (NULL, this, peerinfo);
- if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "Failed to "
- "send unlock request for operation "
- "'Volume %s' to peer %s",
- gd_op_list[opinfo.op],
- peerinfo->hostname);
- continue;
+ /* Based on the op_version, release the cluster or volume lock */
+ if (priv->op_version < 3) {
+ proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_UNLOCK];
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to send unlock request "
+ "for operation 'Volume %s' to "
+ "peer %s",
+ gd_op_list[opinfo.op],
+ peerinfo->hostname);
+ continue;
+ }
+ pending_count++;
+ }
+ } else {
+ dict = glusterd_op_get_ctx ();
+ dict_ref (dict);
+
+ proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_VOLUME_UNLOCK];
+ if (proc->fn) {
+ ret = dict_set_static_ptr (dict, "peerinfo",
+ peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to set peerinfo");
+ dict_unref (dict);
+ goto out;
+ }
+
+ ret = proc->fn (NULL, this, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to send volume unlock "
+ "request for operation "
+ "'Volume %s' to peer %s",
+ gd_op_list[opinfo.op],
+ peerinfo->hostname);
+ dict_unref (dict);
+ continue;
+ }
+ pending_count++;
}
- pending_count++;
}
}
opinfo.pending_count = pending_count;
if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
+out:
gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);
-
return ret;
-
}
static int
@@ -2430,7 +2623,8 @@ glusterd_op_ac_ack_drain (glusterd_op_sm_event_t *event, void *ctx)
opinfo.pending_count--;
if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, NULL);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
@@ -2446,43 +2640,92 @@ glusterd_op_ac_send_unlock_drain (glusterd_op_sm_event_t *event, void *ctx)
static int
glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
{
- glusterd_op_lock_ctx_t *lock_ctx = NULL;
- int32_t ret = 0;
+ int32_t ret = 0;
+ char *volname = NULL;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
GF_ASSERT (event);
GF_ASSERT (ctx);
+ this = THIS;
+ priv = this->private;
+
lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
- ret = glusterd_lock (lock_ctx->uuid);
+ /* If the req came from a node running on older op_version
+ * the dict won't be present. Based on it acquiring a cluster
+ * or volume lock */
+ if (lock_ctx->dict == NULL) {
+ ret = glusterd_lock (lock_ctx->uuid);
+ glusterd_op_lock_send_resp (lock_ctx->req, ret);
+ } else {
+ ret = dict_get_str (lock_ctx->dict, "volname", &volname);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire volname");
+ else {
+ ret = glusterd_volume_lock (volname, lock_ctx->uuid);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock for %s",
+ volname);
+ }
- gf_log (THIS->name, GF_LOG_DEBUG, "Lock Returned %d", ret);
+ glusterd_op_volume_lock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
- glusterd_op_lock_send_resp (lock_ctx->req, ret);
+ dict_unref (lock_ctx->dict);
+ }
+ gf_log (THIS->name, GF_LOG_DEBUG, "Lock Returned %d", ret);
return ret;
}
static int
glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- glusterd_op_lock_ctx_t *lock_ctx = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ int32_t ret = 0;
+ char *volname = NULL;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
GF_ASSERT (event);
GF_ASSERT (ctx);
this = THIS;
priv = this->private;
+
lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
- ret = glusterd_unlock (lock_ctx->uuid);
+ /* If the req came from a node running on older op_version
+ * the dict won't be present. Based on it releasing the cluster
+ * or volume lock */
+ if (lock_ctx->dict == NULL) {
+ ret = glusterd_unlock (lock_ctx->uuid);
+ glusterd_op_unlock_send_resp (lock_ctx->req, ret);
+ } else {
+ ret = dict_get_str (lock_ctx->dict, "volname", &volname);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire volname");
+ else {
+ ret = glusterd_volume_unlock (volname, lock_ctx->uuid);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s", volname);
+ }
- gf_log (this->name, GF_LOG_DEBUG, "Unlock Returned %d", ret);
+ glusterd_op_volume_unlock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
- glusterd_op_unlock_send_resp (lock_ctx->req, ret);
+ dict_unref (lock_ctx->dict);
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG, "Unlock Returned %d", ret);
if (priv->pending_quorum_action)
glusterd_do_quorum_action ();
@@ -2520,7 +2763,8 @@ glusterd_op_ac_rcvd_lock_acc (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC,
+ &event->txn_id, NULL);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -2955,7 +3199,8 @@ out:
if (dict)
dict_unref (dict);
if (ret) {
- glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+ glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ &event->txn_id, NULL);
opinfo.op_ret = ret;
}
@@ -2964,7 +3209,7 @@ out:
opinfo.pending_count);
if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
gf_log (this->name, GF_LOG_DEBUG, "Returning with %d", ret);
@@ -2973,7 +3218,7 @@ out:
}
static int32_t
-glusterd_op_start_rb_timer (dict_t *dict)
+glusterd_op_start_rb_timer (dict_t *dict, uuid_t *txn_id)
{
int32_t op = 0;
struct timeval timeout = {0, };
@@ -2992,7 +3237,7 @@ glusterd_op_start_rb_timer (dict_t *dict)
}
if (op != GF_REPLACE_OP_START) {
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (txn_id);
goto out;
}
@@ -3007,6 +3252,17 @@ glusterd_op_start_rb_timer (dict_t *dict)
ret = -1;
goto out;
}
+
+ ret = dict_set_bin (rb_ctx, "transaction_id",
+ txn_id, sizeof (uuid_t));
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR,
+ "Failed to set transaction id.");
+ goto out;
+ } else
+ gf_log ("", GF_LOG_DEBUG,
+ "transaction_id = %s", uuid_utoa (*txn_id));
+
priv->timer = gf_timer_call_after (THIS->ctx, timeout,
glusterd_do_replace_brick,
(void *) rb_ctx);
@@ -3432,17 +3688,19 @@ out:
if (dict)
dict_unref (dict);
if (ret) {
- glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+ glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ &event->txn_id, NULL);
opinfo.op_ret = ret;
}
if (!opinfo.pending_count) {
if (op == GD_OP_REPLACE_BRICK) {
- ret = glusterd_op_start_rb_timer (op_dict);
+ ret = glusterd_op_start_rb_timer (op_dict,
+ &event->txn_id);
} else {
glusterd_op_modify_op_ctx (op, NULL);
- ret = glusterd_op_sm_inject_all_acc ();
+ ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
}
goto err;
}
@@ -3467,7 +3725,8 @@ glusterd_op_ac_rcvd_stage_op_acc (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_ACC,
+ &event->txn_id, NULL);
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3488,7 +3747,8 @@ glusterd_op_ac_stage_op_failed (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, NULL);
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3509,7 +3769,8 @@ glusterd_op_ac_commit_op_failed (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, NULL);
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3552,7 +3813,8 @@ glusterd_op_ac_brick_op_failed (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.brick_pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, ev_ctx->commit_ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, ev_ctx->commit_ctx);
out:
if (ev_ctx->rsp_dict)
@@ -3594,7 +3856,7 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- ret = glusterd_op_start_rb_timer (op_ctx);
+ ret = glusterd_op_start_rb_timer (op_ctx, &event->txn_id);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Couldn't start "
"replace-brick operation.");
@@ -3609,10 +3871,12 @@ glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx)
out:
if (commit_ack_inject) {
if (ret)
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ &event->txn_id, NULL);
else if (!opinfo.pending_count) {
glusterd_op_modify_op_ctx (op, NULL);
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_ACC,
+ &event->txn_id, NULL);
}
/*else do nothing*/
}
@@ -3633,7 +3897,8 @@ glusterd_op_ac_rcvd_unlock_acc (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC,
+ &event->txn_id, NULL);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3667,7 +3932,7 @@ glusterd_op_reset_ctx ()
}
int32_t
-glusterd_op_txn_complete ()
+glusterd_op_txn_complete (uuid_t *txn_id)
{
int32_t ret = -1;
glusterd_conf_t *priv = NULL;
@@ -3677,6 +3942,7 @@ glusterd_op_txn_complete ()
rpcsvc_request_t *req = NULL;
void *ctx = NULL;
char *op_errstr = NULL;
+ char *volname = NULL;
xlator_t *this = NULL;
this = THIS;
@@ -3699,14 +3965,28 @@ glusterd_op_txn_complete ()
glusterd_op_reset_ctx ();
glusterd_op_clear_errstr ();
- ret = glusterd_unlock (MY_UUID);
-
- /* unlock cant/shouldnt fail here!! */
- if (ret) {
- gf_log (this->name, GF_LOG_CRITICAL,
- "Unable to clear local lock, ret: %d", ret);
+ /* Based on the op-version, we release the cluster or volume lock */
+ if (priv->op_version < 3) {
+ ret = glusterd_unlock (MY_UUID);
+ /* unlock cant/shouldnt fail here!! */
+ if (ret)
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "Unable to clear local lock, ret: %d", ret);
+ else
+ gf_log (this->name, GF_LOG_DEBUG, "Cleared local lock");
} else {
- gf_log (this->name, GF_LOG_DEBUG, "Cleared local lock");
+ ret = dict_get_str (ctx, "volname", &volname);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire volname");
+
+ if (volname) {
+ ret = glusterd_volume_unlock (volname, MY_UUID);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s",
+ volname);
+ }
}
ret = glusterd_op_send_cli_response (op, op_ret,
@@ -3725,6 +4005,13 @@ glusterd_op_txn_complete ()
if (priv->pending_quorum_action)
glusterd_do_quorum_action ();
+
+ /* Clearing the transaction opinfo */
+ ret = glusterd_clear_txn_opinfo (txn_id);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to clear transaction's opinfo");
+
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -3736,7 +4023,7 @@ glusterd_op_ac_unlocked_all (glusterd_op_sm_event_t *event, void *ctx)
GF_ASSERT (event);
- ret = glusterd_op_txn_complete ();
+ ret = glusterd_op_txn_complete (&event->txn_id);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -3753,6 +4040,7 @@ glusterd_op_ac_stage_op (glusterd_op_sm_event_t *event, void *ctx)
char *op_errstr = NULL;
dict_t *dict = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
this = THIS;
GF_ASSERT (this);
@@ -3778,6 +4066,19 @@ glusterd_op_ac_stage_op (glusterd_op_sm_event_t *event, void *ctx)
status);
}
+ txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+
+ if (txn_id)
+ uuid_copy (*txn_id, event->txn_id);
+ else
+ gf_log (this->name, GF_LOG_ERROR, "Out of Memory");
+
+ ret = dict_set_bin (rsp_dict, "transaction_id",
+ txn_id, sizeof(uuid_t *));
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set transaction id.");
+
ret = glusterd_op_stage_send_resp (req_ctx->req, req_ctx->op,
status, op_errstr, rsp_dict);
@@ -3842,6 +4143,7 @@ glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
dict_t *dict = NULL;
dict_t *rsp_dict = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
this = THIS;
GF_ASSERT (this);
@@ -3871,6 +4173,19 @@ glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
"'Volume %s' failed: %d", gd_op_list[req_ctx->op],
status);
+ txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+
+ if (txn_id)
+ uuid_copy (*txn_id, event->txn_id);
+ else
+ gf_log (this->name, GF_LOG_ERROR, "Out of Memory");
+
+ ret = dict_set_bin (rsp_dict, "transaction_id",
+ txn_id, sizeof(uuid_t));
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set transaction id.");
+
ret = glusterd_op_commit_send_resp (req_ctx->req, req_ctx->op,
status, op_errstr, rsp_dict);
@@ -4993,7 +5308,8 @@ glusterd_op_ac_send_brick_op (glusterd_op_sm_event_t *event, void *ctx)
if (!opinfo.pending_count && !opinfo.brick_pending_count) {
glusterd_clear_pending_nodes (&opinfo.pending_bricks);
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, req_ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
+ &event->txn_id, req_ctx);
}
out:
@@ -5047,7 +5363,8 @@ glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx)
if (opinfo.brick_pending_count > 0)
goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, ev_ctx->commit_ctx);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, &event->txn_id,
+ ev_ctx->commit_ctx);
out:
if (ev_ctx->rsp_dict)
@@ -5425,7 +5742,7 @@ glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
int
glusterd_op_sm_inject_event (glusterd_op_sm_event_type_t event_type,
- void *ctx)
+ uuid_t *txn_id, void *ctx)
{
int32_t ret = -1;
glusterd_op_sm_event_t *event = NULL;
@@ -5440,6 +5757,9 @@ glusterd_op_sm_inject_event (glusterd_op_sm_event_type_t event_type,
event->ctx = ctx;
+ if (txn_id)
+ uuid_copy (event->txn_id, *txn_id);
+
gf_log (THIS->name, GF_LOG_DEBUG, "Enqueue event: '%s'",
glusterd_op_sm_event_name_get (event->event));
list_add_tail (&event->list, &gd_op_sm_queue);
@@ -5500,6 +5820,7 @@ glusterd_op_sm ()
glusterd_op_sm_t *state = NULL;
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
xlator_t *this = NULL;
+ glusterd_op_info_t txn_op_info;
this = THIS;
GF_ASSERT (this);
@@ -5520,6 +5841,20 @@ glusterd_op_sm ()
"type: '%s'",
glusterd_op_sm_event_name_get(event_type));
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s",
+ uuid_utoa (event->txn_id));
+
+ ret = glusterd_get_txn_opinfo (&event->txn_id,
+ &txn_op_info);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get transaction's opinfo");
+ glusterd_destroy_op_event_ctx (event);
+ GF_FREE (event);
+ continue;
+ } else
+ opinfo = txn_op_info;
+
state = glusterd_op_state_table[opinfo.state.state];
GF_ASSERT (state);
@@ -5550,8 +5885,27 @@ glusterd_op_sm ()
return ret;
}
+ if ((state[event_type].next_state ==
+ GD_OP_STATE_DEFAULT) &&
+ (event_type == GD_OP_EVENT_UNLOCK)) {
+ /* Clearing the transaction opinfo */
+ ret = glusterd_clear_txn_opinfo(&event->txn_id);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to clear "
+ "transaction's opinfo");
+ } else {
+ ret = glusterd_set_txn_opinfo (&event->txn_id,
+ &opinfo);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set "
+ "transaction's opinfo");
+ }
+
glusterd_destroy_op_event_ctx (event);
GF_FREE (event);
+
}
}
@@ -5605,7 +5959,7 @@ glusterd_op_clear_op (glusterd_op_t op)
}
int32_t
-glusterd_op_init_ctx (glusterd_op_t op)
+glusterd_op_init_ctx (glusterd_op_t op, glusterd_op_info_t *op_info)
{
int ret = 0;
dict_t *dict = NULL;
@@ -5625,9 +5979,8 @@ glusterd_op_init_ctx (glusterd_op_t op)
ret = -1;
goto out;
}
- ret = glusterd_op_set_ctx (dict);
- if (ret)
- goto out;
+
+ op_info->op_ctx = dict;
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index 321c5c484..525d80d38 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -75,6 +75,7 @@ struct glusterd_op_sm_event_ {
struct list_head list;
void *ctx;
glusterd_op_sm_event_type_t event;
+ uuid_t txn_id;
};
typedef struct glusterd_op_sm_event_ glusterd_op_sm_event_t;
@@ -117,6 +118,7 @@ typedef struct glusterd_op_log_filename_ctx_ glusterd_op_log_filename_ctx_t;
struct glusterd_op_lock_ctx_ {
uuid_t uuid;
+ dict_t *dict;
rpcsvc_request_t *req;
};
@@ -167,7 +169,7 @@ glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
glusterd_op_sm_event_t **new_event);
int
glusterd_op_sm_inject_event (glusterd_op_sm_event_type_t event_type,
- void *ctx);
+ uuid_t *txn_id, void *ctx);
int
glusterd_op_sm_init ();
@@ -252,7 +254,7 @@ glusterd_op_init_commit_rsp_dict (glusterd_op_t op);
void
glusterd_op_modify_op_ctx (glusterd_op_t op, void *op_ctx);
int32_t
-glusterd_op_init_ctx (glusterd_op_t op);
+glusterd_op_init_ctx (glusterd_op_t op, glusterd_op_info_t *op_info);
int32_t
glusterd_op_fini_ctx ();
int32_t
@@ -278,4 +280,13 @@ glusterd_check_gsync_running (glusterd_volinfo_t *volinfo, gf_boolean_t *flag);
int
glusterd_defrag_volume_node_rsp (dict_t *req_dict, dict_t *rsp_dict,
dict_t *op_ctx);
+
+int32_t
+glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo);
+
+int32_t
+glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo);
+
+int32_t
+glusterd_clear_txn_opinfo (uuid_t *txn_id);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 031de2c9b..b7b974c68 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -532,7 +532,7 @@ glusterd_op_stage_rebalance (dict_t *dict, char **op_errstr)
case GF_DEFRAG_CMD_START:
case GF_DEFRAG_CMD_START_LAYOUT_FIX:
case GF_DEFRAG_CMD_START_FORCE:
- if (is_origin_glusterd ()) {
+ if (is_origin_glusterd (dict)) {
op_ctx = glusterd_op_get_ctx ();
if (!op_ctx) {
ret = -1;
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
index 5c879b64c..6f7e726c9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
@@ -31,6 +31,7 @@
DEFAULT_VAR_RUN_DIRECTORY"/%s-"RB_CLIENT_MOUNTPOINT, \
volinfo->volname);
+extern uuid_t global_txn_id;
int
glusterd_get_replace_op_str (gf1_cli_replace_op op, char *op_str)
@@ -332,7 +333,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- if (is_origin_glusterd ()) {
+ if (is_origin_glusterd (dict)) {
if (!ctx) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR,
@@ -1638,7 +1639,7 @@ glusterd_op_replace_brick (dict_t *dict, dict_t *rsp_dict)
/* Set task-id, if available, in op_ctx dict for operations
* other than start
*/
- if (is_origin_glusterd ()) {
+ if (is_origin_glusterd (dict)) {
ctx = glusterd_op_get_ctx();
if (!ctx) {
gf_log (this->name, GF_LOG_ERROR, "Failed to "
@@ -1901,6 +1902,7 @@ glusterd_do_replace_brick (void *data)
glusterd_brickinfo_t *src_brickinfo = NULL;
glusterd_brickinfo_t *dst_brickinfo = NULL;
glusterd_conf_t *priv = NULL;
+ uuid_t *txn_id = &global_txn_id;
int ret = 0;
@@ -1920,6 +1922,10 @@ glusterd_do_replace_brick (void *data)
gf_log ("", GF_LOG_DEBUG,
"Replace brick operation detected");
+ ret = dict_get_bin (dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
ret = dict_get_int32 (dict, "operation", &op);
if (ret) {
gf_log ("", GF_LOG_DEBUG,
@@ -2015,9 +2021,11 @@ glusterd_do_replace_brick (void *data)
out:
if (ret)
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ txn_id, NULL);
else
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_ACC, NULL);
+ ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_ACC,
+ txn_id, NULL);
glusterd_op_sm ();
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 2d8d381bf..28b5bd155 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -33,6 +33,7 @@
extern glusterd_op_info_t opinfo;
+extern uuid_t global_txn_id;
int32_t
glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
@@ -581,6 +582,7 @@ __glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
this = THIS;
GF_ASSERT (this);
@@ -625,7 +627,7 @@ out:
event_type = GD_OP_EVENT_RCVD_ACC;
}
- ret = glusterd_op_sm_inject_event (event_type, NULL);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
glusterd_friend_sm ();
@@ -644,6 +646,164 @@ glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
__glusterd_cluster_lock_cbk);
}
+static int32_t
+glusterd_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_volume_lock_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_volume_lock_rsp);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to decode volume lock "
+ "response received from peer");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ op_ret = rsp.op_ret;
+
+ txn_id = &rsp.txn_id;
+
+ gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
+ "Received volume lock %s from uuid: %s",
+ (op_ret) ? "RJT" : "ACC", uuid_utoa (rsp.uuid));
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "Volume lock response received "
+ "from unknown peer: %s", uuid_utoa (rsp.uuid));
+ }
+
+ if (op_ret) {
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ opinfo.op_ret = op_ret;
+ opinfo.op_errstr = gf_strdup ("Another transaction could be in "
+ "progress. Please try again after"
+ " sometime.");
+ } else {
+ event_type = GD_OP_EVENT_RCVD_ACC;
+ }
+
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe));
+ return ret;
+}
+
+int32_t
+glusterd_vol_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ glusterd_vol_lock_cbk_fn);
+}
+
+static int32_t
+glusterd_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_volume_unlock_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_volume_unlock_rsp);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to decode volume unlock "
+ "response received from peer");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+out:
+ op_ret = rsp.op_ret;
+
+ txn_id = &rsp.txn_id;
+
+ gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
+ "Received volume unlock %s from uuid: %s",
+ (op_ret) ? "RJT" : "ACC",
+ uuid_utoa (rsp.uuid));
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "Volume unlock response received "
+ "from unknown peer: %s", uuid_utoa (rsp.uuid));
+ }
+
+ if (op_ret) {
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ opinfo.op_ret = op_ret;
+ opinfo.op_errstr = gf_strdup ("Another transaction could be in "
+ "progress. Please try again after"
+ " sometime.");
+ } else {
+ event_type = GD_OP_EVENT_RCVD_ACC;
+ }
+
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ GLUSTERD_STACK_DESTROY (((call_frame_t *)myframe));
+ return ret;
+}
+
+int32_t
+glusterd_vol_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ glusterd_vol_unlock_cbk_fn);
+}
+
int32_t
__glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
@@ -654,6 +814,7 @@ __glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
glusterd_peerinfo_t *peerinfo = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
this = THIS;
GF_ASSERT (this);
@@ -695,7 +856,7 @@ out:
event_type = GD_OP_EVENT_RCVD_ACC;
}
- ret = glusterd_op_sm_inject_event (event_type, NULL);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
glusterd_friend_sm ();
@@ -727,6 +888,7 @@ __glusterd_stage_op_cbk (struct rpc_req *req, struct iovec *iov,
char err_str[2048] = {0};
char *peer_str = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
this = THIS;
GF_ASSERT (this);
@@ -779,6 +941,10 @@ out:
"Received stage %s from uuid: %s",
(op_ret) ? "RJT" : "ACC", uuid_utoa (rsp.uuid));
+ ret = dict_get_bin (dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
if (ret) {
@@ -814,7 +980,7 @@ out:
break;
}
- ret = glusterd_op_sm_inject_event (event_type, NULL);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
glusterd_friend_sm ();
@@ -854,6 +1020,8 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
char err_str[2048] = {0};
char *peer_str = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+
this = THIS;
GF_ASSERT (this);
@@ -907,6 +1075,10 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
"Received commit %s from uuid: %s",
(op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid));
+ ret = dict_get_bin (dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
if (ret) {
@@ -986,7 +1158,7 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
}
out:
- ret = glusterd_op_sm_inject_event (event_type, NULL);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, NULL);
if (!ret) {
glusterd_friend_sm ();
@@ -1223,6 +1395,106 @@ out:
}
int32_t
+glusterd_vol_lock (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_volume_lock_req req = {{0},};
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ dict_t *dict = NULL;
+
+ if (!this)
+ goto out;
+
+ dict = data;
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_ptr (dict, "peerinfo", VOID (&peerinfo));
+ if (ret)
+ goto out;
+
+ //peerinfo should not be in payload
+ dict_del (dict, "peerinfo");
+
+ glusterd_get_uuid (&req.uuid);
+
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to serialize dict "
+ "to request buffer");
+ goto out;
+ }
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+ if (!dummy_frame)
+ goto out;
+
+ ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
+ peerinfo->mgmt,
+ GLUSTERD_MGMT_VOLUME_LOCK, NULL,
+ this, glusterd_vol_lock_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_volume_lock_req);
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_vol_unlock (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_volume_unlock_req req = {{0},};
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ dict_t *dict = NULL;
+
+ if (!this)
+ goto out;
+
+ dict = data;
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_ptr (dict, "peerinfo", VOID (&peerinfo));
+ if (ret)
+ goto out;
+
+ //peerinfo should not be in payload
+ dict_del (dict, "peerinfo");
+
+ glusterd_get_uuid (&req.uuid);
+
+ ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to serialize dict "
+ "to request buffer");
+ goto out;
+ }
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+ if (!dummy_frame)
+ goto out;
+
+ ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
+ peerinfo->mgmt,
+ GLUSTERD_MGMT_VOLUME_UNLOCK, NULL,
+ this, glusterd_vol_unlock_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_volume_unlock_req);
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
glusterd_cluster_unlock (call_frame_t *frame, xlator_t *this,
void *data)
{
@@ -1385,6 +1657,7 @@ __glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
glusterd_req_ctx_t *req_ctx = NULL;
glusterd_pending_node_t *node = NULL;
xlator_t *this = NULL;
+ uuid_t *txn_id = &global_txn_id;
this = THIS;
GF_ASSERT (this);
@@ -1447,6 +1720,11 @@ __glusterd_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
}
}
out:
+
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
ev_ctx = GF_CALLOC (1, sizeof (*ev_ctx), gf_gld_mt_brick_rsp_ctx_t);
GF_ASSERT (ev_ctx);
if (op_ret) {
@@ -1459,7 +1737,7 @@ out:
ev_ctx->pending_node = frame->cookie;
ev_ctx->rsp_dict = dict;
ev_ctx->commit_ctx = frame->local;
- ret = glusterd_op_sm_inject_event (event_type, ev_ctx);
+ ret = glusterd_op_sm_inject_event (event_type, txn_id, ev_ctx);
if (!ret) {
glusterd_friend_sm ();
glusterd_op_sm ();
@@ -1484,6 +1762,7 @@ int32_t
glusterd_brick_op (call_frame_t *frame, xlator_t *this,
void *data)
{
+
gd1_mgmt_brick_op_req *req = NULL;
int ret = 0;
glusterd_conf_t *priv = NULL;
@@ -1494,6 +1773,7 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
glusterd_req_ctx_t *req_ctx = NULL;
struct rpc_clnt *rpc = NULL;
dict_t *op_ctx = NULL;
+ uuid_t *txn_id = &global_txn_id;
if (!this) {
ret = -1;
@@ -1516,6 +1796,11 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
goto out;
}
+
+ ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
+
+ gf_log ("", GF_LOG_DEBUG, "transaction ID = %s", uuid_utoa (*txn_id));
+
list_for_each_entry (pending_node, &opinfo.pending_bricks, list) {
dummy_frame = create_frame (this, this->ctx->pool);
if (!dummy_frame)
@@ -1593,7 +1878,8 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
out:
if (ret) {
- glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, data);
+ glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
+ txn_id, data);
opinfo.op_ret = ret;
}
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
@@ -1619,6 +1905,8 @@ struct rpc_clnt_procedure gd_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
[GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK", glusterd_cluster_unlock},
[GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd_stage_op},
[GLUSTERD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd_commit_op},
+ [GLUSTERD_MGMT_VOLUME_LOCK] = {"VOLUME_LOCK", glusterd_vol_lock},
+ [GLUSTERD_MGMT_VOLUME_UNLOCK] = {"VOLUME_UNLOCK", glusterd_vol_unlock},
};
struct rpc_clnt_program gd_mgmt_prog = {
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index a854e0530..8b6019107 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -17,6 +17,9 @@
#include "glusterd.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
+#include "glusterd-locks.h"
+
+extern glusterd_op_info_t opinfo;
static inline void
gd_synctask_barrier_wait (struct syncargs *args, int count)
@@ -56,11 +59,25 @@ gd_collate_errors (struct syncargs *args, int op_ret, int op_errno,
}
switch (op_code){
+ case GLUSTERD_MGMT_VOLUME_LOCK:
+ {
+ len = snprintf (op_err, sizeof(op_err) - 1,
+ "Locking volume failed "
+ "on %s. %s", peer_str, err_str);
+ break;
+ }
+ case GLUSTERD_MGMT_VOLUME_UNLOCK:
+ {
+ len = snprintf (op_err, sizeof(op_err) - 1,
+ "Unlocking volume failed "
+ "on %s. %s", peer_str, err_str);
+ break;
+ }
case GLUSTERD_MGMT_CLUSTER_LOCK :
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Locking failed on %s. %s",
- peer_str, err_str);
+ "Locking cluster failed "
+ "on %s. %s", peer_str, err_str);
break;
}
case GLUSTERD_MGMT_CLUSTER_UNLOCK :
@@ -353,6 +370,181 @@ gd_syncop_mgmt_lock (glusterd_peerinfo_t *peerinfo, struct syncargs *args,
}
int32_t
+_gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int ret = -1;
+ struct syncargs *args = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gd1_mgmt_volume_lock_rsp rsp = {{0},};
+ call_frame_t *frame = NULL;
+ int op_ret = -1;
+ int op_errno = -1;
+
+ GF_ASSERT(req);
+ GF_ASSERT(iov);
+ GF_ASSERT(myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerinfo = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_volume_lock_rsp);
+ if (ret < 0)
+ goto out;
+
+ uuid_copy (args->uuid, rsp.uuid);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+out:
+ gd_collate_errors (args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_VOLUME_LOCK, peerinfo, rsp.uuid);
+ STACK_DESTROY (frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ _gd_syncop_mgmt_volume_lock_cbk);
+}
+
+int
+gd_syncop_mgmt_volume_lock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid, uuid_t txn_id)
+{
+ int ret = -1;
+ gd1_mgmt_volume_lock_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize (op_ctx,
+ &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ uuid_copy (req.uuid, my_uuid);
+ uuid_copy (req.txn_id, txn_id);
+ req.op = op;
+ synclock_unlock (&conf->big_lock);
+ ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
+ &gd_mgmt_prog,
+ GLUSTERD_MGMT_VOLUME_LOCK,
+ gd_syncop_mgmt_volume_lock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_volume_lock_req);
+ synclock_lock (&conf->big_lock);
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+_gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ int ret = -1;
+ struct syncargs *args = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gd1_mgmt_volume_unlock_rsp rsp = {{0},};
+ call_frame_t *frame = NULL;
+ int op_ret = -1;
+ int op_errno = -1;
+
+ GF_ASSERT(req);
+ GF_ASSERT(iov);
+ GF_ASSERT(myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerinfo = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_volume_unlock_rsp);
+ if (ret < 0)
+ goto out;
+
+ uuid_copy (args->uuid, rsp.uuid);
+
+ /* Set peer as locked, so we unlock only the locked peers */
+ if (rsp.op_ret == 0)
+ peerinfo->locked = _gf_true;
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+out:
+ gd_collate_errors (args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_VOLUME_UNLOCK, peerinfo, rsp.uuid);
+ STACK_DESTROY (frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ return glusterd_big_locked_cbk (req, iov, count, myframe,
+ _gd_syncop_mgmt_volume_unlock_cbk);
+}
+
+int
+gd_syncop_mgmt_volume_unlock (dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid, uuid_t txn_id)
+{
+ int ret = -1;
+ gd1_mgmt_volume_unlock_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize (op_ctx,
+ &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ uuid_copy (req.uuid, my_uuid);
+ uuid_copy (req.txn_id, txn_id);
+ synclock_unlock (&conf->big_lock);
+ ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
+ &gd_mgmt_prog,
+ GLUSTERD_MGMT_VOLUME_UNLOCK,
+ gd_syncop_mgmt_volume_unlock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_volume_unlock_req);
+ synclock_lock (&conf->big_lock);
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
_gd_syncop_mgmt_unlock_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
@@ -814,8 +1006,8 @@ gd_build_peers_list (struct list_head *peers, struct list_head *xact_peers,
}
int
-gd_lock_op_phase (struct list_head *peers, glusterd_op_t op, dict_t *op_ctx,
- char **op_errstr, int npeers)
+gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
+ char **op_errstr, int npeers, uuid_t txn_id)
{
int ret = -1;
int peer_cnt = 0;
@@ -823,6 +1015,9 @@ gd_lock_op_phase (struct list_head *peers, glusterd_op_t op, dict_t *op_ctx,
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
+ struct list_head *peers = NULL;
+
+ peers = &conf->xaction_peers;
if (!npeers) {
ret = 0;
@@ -833,22 +1028,38 @@ gd_lock_op_phase (struct list_head *peers, glusterd_op_t op, dict_t *op_ctx,
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- /* Reset lock status */
- peerinfo->locked = _gf_false;
- gd_syncop_mgmt_lock (peerinfo, &args, MY_UUID, peer_uuid);
+ if (conf->op_version < 3) {
+ /* Reset lock status */
+ peerinfo->locked = _gf_false;
+ gd_syncop_mgmt_lock (peerinfo, &args,
+ MY_UUID, peer_uuid);
+ } else
+ gd_syncop_mgmt_volume_lock (op, op_ctx, peerinfo, &args,
+ MY_UUID, peer_uuid, txn_id);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
- ret = args.op_ret;
- if (ret) {
- gf_asprintf (op_errstr, "Another transaction could be "
- "in progress. Please try again after "
- "sometime.");
- gf_log (this->name, GF_LOG_ERROR, "Failed to acquire lock");
- goto out;
+
+ if (args.op_ret) {
+ if (args.errstr)
+ *op_errstr = gf_strdup (args.errstr);
+ else {
+ ret = gf_asprintf (op_errstr, "Another transaction could be "
+ "in progress. Please try again after "
+ "sometime.");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire lock");
+
+ }
}
- ret = 0;
+ ret = args.op_ret;
+
+ gf_log (this->name, GF_LOG_DEBUG, "Sent lock op req for 'Volume %s' "
+ "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
out:
return ret;
}
@@ -1016,9 +1227,10 @@ out:
}
int
-gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret,
+gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int op_ret,
rpcsvc_request_t *req, dict_t *op_ctx, char *op_errstr,
- int npeers, gf_boolean_t is_locked)
+ int npeers, char *volname, gf_boolean_t is_acquired,
+ uuid_t txn_id)
{
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_peerinfo_t *tmp = NULL;
@@ -1027,6 +1239,9 @@ gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret,
int ret = -1;
xlator_t *this = NULL;
struct syncargs args = {0};
+ struct list_head *peers = NULL;
+
+ peers = &conf->xaction_peers;
if (!npeers) {
ret = 0;
@@ -1035,20 +1250,23 @@ gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret,
/* If the lock has not been held during this
* transaction, do not send unlock requests */
- if (!is_locked)
+ if (!is_acquired)
goto out;
this = THIS;
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry_safe (peerinfo, tmp, peers, op_peers_list) {
- /* Only unlock peers that were locked */
- if (peerinfo->locked) {
- gd_syncop_mgmt_unlock (peerinfo, &args, MY_UUID,
- tmp_uuid);
- peer_cnt++;
- }
-
+ if (conf->op_version < 3) {
+ /* Only unlock peers that were locked */
+ if (peerinfo->locked)
+ gd_syncop_mgmt_unlock (peerinfo, &args,
+ MY_UUID, tmp_uuid);
+ } else
+ gd_syncop_mgmt_volume_unlock (op_ctx, peerinfo,
+ &args, MY_UUID,
+ tmp_uuid, txn_id);
+ peer_cnt++;
list_del_init (&peerinfo->op_peers_list);
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1061,8 +1279,18 @@ gd_unlock_op_phase (struct list_head *peers, glusterd_op_t op, int op_ret,
out:
glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr);
glusterd_op_clear_op (op);
- if (is_locked)
- glusterd_unlock (MY_UUID);
+ if (is_acquired) {
+ /* Based on the op-version, we release the cluster or volume lock */
+ if (conf->op_version < 3)
+ glusterd_unlock (MY_UUID);
+ else {
+ ret = glusterd_volume_unlock (volname, MY_UUID);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s",
+ volname);
+ }
+ }
return 0;
}
@@ -1079,7 +1307,8 @@ gd_get_brick_count (struct list_head *bricks)
}
int
-gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, char **op_errstr)
+gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr)
{
glusterd_pending_node_t *pending_node = NULL;
struct list_head selected = {0,};
@@ -1151,21 +1380,63 @@ out:
void
gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
{
- int ret = -1;
- int npeers = 0;
- dict_t *req_dict = NULL;
- glusterd_conf_t *conf = NULL;
- glusterd_op_t op = 0;
- int32_t tmp_op = 0;
- char *op_errstr = NULL;
- xlator_t *this = NULL;
- gf_boolean_t is_locked = _gf_false;
+ int ret = -1;
+ int npeers = 0;
+ dict_t *req_dict = NULL;
+ glusterd_conf_t *conf = NULL;
+ glusterd_op_t op = 0;
+ int32_t tmp_op = 0;
+ char *op_errstr = NULL;
+ char *tmp = NULL;
+ char *volname = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_acquired = _gf_false;
+ uuid_t *txn_id = NULL;
+ uuid_t *originator_uuid = NULL;
+ glusterd_op_info_t txn_opinfo;
this = THIS;
GF_ASSERT (this);
conf = this->private;
GF_ASSERT (conf);
+ /* Generate a transaction-id for this operation and
+ * save it in the dict */
+ txn_id = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!txn_id) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_generate (*txn_id);
+
+ ret = dict_set_bin (op_ctx, "transaction_id",
+ txn_id, sizeof (uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set transaction id.");
+ goto out;
+ } else
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Transaction_id = %s", uuid_utoa (*txn_id));
+
+ /* Save the MY_UUID as the originator_uuid */
+ originator_uuid = GF_CALLOC (1, sizeof(uuid_t),
+ gf_common_mt_uuid_t);
+ if (!originator_uuid) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (*originator_uuid, MY_UUID);
+ ret = dict_set_bin (op_ctx, "originator_uuid",
+ originator_uuid, sizeof (uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set originator_uuid.");
+ goto out;
+ }
+
ret = dict_get_int32 (op_ctx, GD_SYNC_OPCODE_KEY, &tmp_op);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get volume "
@@ -1174,26 +1445,71 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
}
op = tmp_op;
- ret = glusterd_lock (MY_UUID);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to acquire lock");
- gf_asprintf (&op_errstr, "Another transaction is in progress. "
- "Please try again after sometime.");
- goto out;
+
+ /* Based on the op_version, acquire a cluster or volume lock */
+ if (conf->op_version < 3) {
+ ret = glusterd_lock (MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock");
+ gf_asprintf (&op_errstr,
+ "Another transaction is in progress. "
+ "Please try again after sometime.");
+ goto out;
+ }
+ } else {
+
+ /* If no volname is given as a part of the command, locks will
+ * not be held */
+ ret = dict_get_str (op_ctx, "volname", &tmp);
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "Failed to get volume "
+ "name");
+ goto local_locking_done;
+ } else {
+ /* Use a copy of volname, as cli response will be
+ * sent before the unlock, and the volname in the
+ * dict, might be removed */
+ volname = gf_strdup (tmp);
+ if (!volname)
+ goto out;
+ }
+
+ ret = glusterd_volume_lock (volname, MY_UUID);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock for %s", volname);
+ gf_asprintf (&op_errstr,
+ "Another transaction is in progress "
+ "for %s. Please try again after sometime.",
+ volname);
+ goto out;
+ }
}
- is_locked = _gf_true;
+ is_acquired = _gf_true;
+
+local_locking_done:
+
+ /* Save opinfo for this transaction with the transaction id */
+ txn_opinfo.op = op;
+ ret = glusterd_set_txn_opinfo (txn_id, &txn_opinfo);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set transaction's opinfo");
+
+ opinfo = txn_opinfo;
- /* storing op globally to access in synctask code paths
- * This is still acceptable, as we are performing this under
- * the 'cluster' lock*/
- glusterd_op_set_op (op);
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
- ret = gd_lock_op_phase (&conf->xaction_peers, op, op_ctx, &op_errstr, npeers);
- if (ret)
- goto out;
+ /* If no volname is given as a part of the command, locks will
+ * not be held */
+ if (volname) {
+ ret = gd_lock_op_phase (conf, op, op_ctx, &op_errstr, npeers, *txn_id);
+ if (ret)
+ goto out;
+ }
ret = glusterd_op_build_payload (&req_dict, &op_errstr, op_ctx);
if (ret) {
@@ -1220,8 +1536,17 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
ret = 0;
out:
- (void) gd_unlock_op_phase (&conf->xaction_peers, op, ret, req,
- op_ctx, op_errstr, npeers, is_locked);
+ (void) gd_unlock_op_phase (conf, op, ret, req, op_ctx, op_errstr,
+ npeers, volname, is_acquired, *txn_id);
+
+ /* Clearing the transaction opinfo */
+ ret = glusterd_clear_txn_opinfo (txn_id);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to clear transaction's opinfo");
+
+ if (volname)
+ GF_FREE (volname);
if (req_dict)
dict_unref (req_dict);
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 7ad6cc882..b0cba4dde 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -36,6 +36,7 @@
#include "glusterd-volgen.h"
#include "glusterd-pmap.h"
#include "glusterfs-acl.h"
+#include "glusterd-locks.h"
#include "xdr-generic.h"
#include <sys/resource.h>
@@ -6834,7 +6835,7 @@ glusterd_volume_status_copy_to_op_ctx_dict (dict_t *aggr, dict_t *rsp_dict)
if (ret)
goto out;
- if (cmd & GF_CLI_STATUS_ALL && is_origin_glusterd ()) {
+ if (cmd & GF_CLI_STATUS_ALL && is_origin_glusterd (ctx_dict)) {
ret = dict_get_int32 (rsp_dict, "vol_count", &vol_count);
if (ret == 0) {
ret = dict_set_int32 (ctx_dict, "vol_count",
@@ -7545,16 +7546,31 @@ glusterd_handle_node_rsp (dict_t *req_dict, void *pending_entry,
* time a lock_owner is set
*/
gf_boolean_t
-is_origin_glusterd ()
+is_origin_glusterd (dict_t *dict)
{
- int ret = 0;
- uuid_t lock_owner = {0,};
+ gf_boolean_t ret = _gf_false;
+ uuid_t lock_owner = {0,};
+ uuid_t *originator_uuid = NULL;
- ret = glusterd_get_lock_owner (&lock_owner);
- if (ret)
- return _gf_false;
+ GF_ASSERT (dict);
+
+ ret = dict_get_bin (dict, "originator_uuid",
+ (void **) &originator_uuid);
+ if (ret) {
+ /* If not originator_uuid has been set, then the command
+ * has been originated from a glusterd running on older version
+ * Hence fetching the lock owner */
+ ret = glusterd_get_lock_owner (&lock_owner);
+ if (ret) {
+ ret = _gf_false;
+ goto out;
+ }
+ ret = !uuid_compare (MY_UUID, lock_owner);
+ } else
+ ret = !uuid_compare (MY_UUID, *originator_uuid);
- return (uuid_compare (MY_UUID, lock_owner) == 0);
+out:
+ return ret;
}
int
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index e85e0eb75..b43dd67e4 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -504,7 +504,7 @@ glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
* time a lock_owner is set
*/
gf_boolean_t
-is_origin_glusterd ();
+is_origin_glusterd (dict_t *dict);
gf_boolean_t
glusterd_is_quorum_changed (dict_t *options, char *option, char *value);
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index bb03e5ba0..a870c0b13 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -36,6 +36,7 @@
#include "glusterd-store.h"
#include "glusterd-hooks.h"
#include "glusterd-utils.h"
+#include "glusterd-locks.h"
#include "common-utils.h"
#include "run.h"
@@ -1137,6 +1138,8 @@ init (xlator_t *this)
glusterd_friend_sm_init ();
glusterd_op_sm_init ();
glusterd_opinfo_init ();
+ glusterd_vol_lock_init ();
+ glusterd_txn_opinfo_init ();
ret = glusterd_sm_tr_log_init (&conf->op_sm_log,
glusterd_op_sm_state_name_get,
glusterd_op_sm_event_name_get,
@@ -1247,6 +1250,8 @@ fini (xlator_t *this)
if (conf->handle)
gf_store_handle_destroy (conf->handle);
glusterd_sm_tr_log_delete (&conf->op_sm_log);
+ glusterd_vol_lock_fini ();
+ glusterd_txn_opinfo_fini ();
GF_FREE (conf);
this->private = NULL;
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index e804c9ce8..4111a1de0 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -528,6 +528,14 @@ int
glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status);
int
+glusterd_op_volume_lock_send_resp (rpcsvc_request_t *req,
+ uuid_t *txn_id, int32_t status);
+
+int
+glusterd_op_volume_unlock_send_resp (rpcsvc_request_t *req,
+ uuid_t *txn_id, int32_t status);
+
+int
glusterd_op_stage_send_resp (rpcsvc_request_t *req,
int32_t op, int32_t status,
char *op_errstr, dict_t *rsp_dict);
@@ -802,6 +810,11 @@ int32_t glusterd_op_begin_synctask (rpcsvc_request_t *req, glusterd_op_t op,
int32_t
glusterd_defrag_event_notify_handle (dict_t *dict);
+int32_t
+glusterd_txn_opinfo_init ();
+
+void
+glusterd_txn_opinfo_fini ();
/* snapshot */
glusterd_snap_t*
glusterd_new_snap_object();