summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvra Sengupta <asengupt@redhat.com>2013-10-08 21:36:06 +0530
committerAvra Sengupta <asengupt@redhat.com>2013-10-10 14:35:44 +0530
commit63ff355ccd2e5fa7402963203046c0aa2608d5a5 (patch)
tree83f4b8174860c20c757ce87b77827b88a93ff90c
parent4b466674ebf6c192cb1ce17b367fb9845c68129d (diff)
glusterd/Jarvis: Initating complete synctask on glusterd
Jarvis is nothing but a complete synctask approach for glusterd to function. The commands making use of this won't be using the op-state machine to inject events and will be using the synctask framework to perform operations across all nodes in the cluster. This patch defines the program and the handlers used. Change-Id: Ibff2c62b0187c40cdea7254c85786297bba60372 Signed-off-by: Avra Sengupta <asengupt@redhat.com>
-rw-r--r--rpc/rpc-lib/src/protocol-common.h15
-rw-r--r--rpc/xdr/src/glusterd1-xdr.c416
-rw-r--r--rpc/xdr/src/glusterd1-xdr.h158
-rw-r--r--rpc/xdr/src/glusterd1-xdr.x86
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-jarvis-handler.c771
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-jarvis.c67
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-jarvis.h34
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c2
12 files changed, 1561 insertions, 2 deletions
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index de9b7422b..1c020ae92 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -223,6 +223,17 @@ typedef enum {
GF_BD_OP_SNAPSHOT_BD,
} gf_xl_bd_op_t ;
+enum glusterd_jarvis_mgmt_procnum {
+ GLUSTERD_JARVIS_NULL, /* 0 */
+ GLUSTERD_JARVIS_VOLUME_LOCK,
+ GLUSTERD_JARVIS_PRE_VALIDATE,
+ GLUSTERD_JARVIS_BRICK_OP,
+ GLUSTERD_JARVIS_COMMIT,
+ GLUSTERD_JARVIS_POST_VALIDATE,
+ GLUSTERD_JARVIS_VOLUME_UNLOCK,
+ GLUSTERD_JARVIS_MAXVALUE,
+};
+
#define GLUSTER_HNDSK_PROGRAM 14398633 /* Completely random */
#define GLUSTER_HNDSK_VERSION 2 /* 0.0.2 */
@@ -253,4 +264,8 @@ typedef enum {
#define GD_MGMT_HNDSK_PROGRAM 1239873 /* Completely random */
#define GD_MGMT_HNDSK_VERSION 1
+/* Jarvis Protocols */
+#define GD_JARVIS_MGMT_PROGRAM 8102013 /* Completely random */
+#define GD_JARVIS_MGMT_VERSION 1
+
#endif /* !_PROTOCOL_COMMON_H */
diff --git a/rpc/xdr/src/glusterd1-xdr.c b/rpc/xdr/src/glusterd1-xdr.c
index 6c6514c90..9bf17bf4b 100644
--- a/rpc/xdr/src/glusterd1-xdr.c
+++ b/rpc/xdr/src/glusterd1-xdr.c
@@ -569,3 +569,419 @@ xdr_gd1_mgmt_volume_unlock_rsp (XDR *xdrs, gd1_mgmt_volume_unlock_rsp *objp)
return FALSE;
return TRUE;
}
+
+bool_t
+xdr_gd1_jarvis_vol_lock_req (XDR *xdrs, gd1_jarvis_vol_lock_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_vol_lock_rsp (XDR *xdrs, gd1_jarvis_vol_lock_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_pre_val_req (XDR *xdrs, gd1_jarvis_pre_val_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_pre_val_rsp (XDR *xdrs, gd1_jarvis_pre_val_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op);
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ objp->op = IXDR_GET_LONG(buf);
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_brick_op_req (XDR *xdrs, gd1_jarvis_brick_op_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_brick_op_rsp (XDR *xdrs, gd1_jarvis_brick_op_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op);
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ objp->op = IXDR_GET_LONG(buf);
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_commit_req (XDR *xdrs, gd1_jarvis_commit_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_commit_rsp (XDR *xdrs, gd1_jarvis_commit_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op);
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ }
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ objp->op = IXDR_GET_LONG(buf);
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ }
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_post_val_req (XDR *xdrs, gd1_jarvis_post_val_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_post_val_rsp (XDR *xdrs, gd1_jarvis_post_val_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+
+ if (xdrs->x_op == XDR_ENCODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ IXDR_PUT_LONG(buf, objp->op);
+ IXDR_PUT_LONG(buf, objp->op_ret);
+ IXDR_PUT_LONG(buf, objp->op_errno);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ } else if (xdrs->x_op == XDR_DECODE) {
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ buf = XDR_INLINE (xdrs, 3 * BYTES_PER_XDR_UNIT);
+ if (buf == NULL) {
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+
+ } else {
+ objp->op = IXDR_GET_LONG(buf);
+ objp->op_ret = IXDR_GET_LONG(buf);
+ objp->op_errno = IXDR_GET_LONG(buf);
+ }
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+ }
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ if (!xdr_string (xdrs, &objp->op_errstr, ~0))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_vol_unlock_req (XDR *xdrs, gd1_jarvis_vol_unlock_req *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ return TRUE;
+}
+
+bool_t
+xdr_gd1_jarvis_vol_unlock_rsp (XDR *xdrs, gd1_jarvis_vol_unlock_rsp *objp)
+{
+ register int32_t *buf;
+ buf = NULL;
+
+ if (!xdr_vector (xdrs, (char *)objp->uuid, 16,
+ sizeof (u_char), (xdrproc_t) xdr_u_char))
+ return FALSE;
+ if (!xdr_bytes (xdrs, (char **)&objp->dict.dict_val, (u_int *) &objp->dict.dict_len, ~0))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_ret))
+ return FALSE;
+ if (!xdr_int (xdrs, &objp->op_errno))
+ return FALSE;
+ return TRUE;
+}
diff --git a/rpc/xdr/src/glusterd1-xdr.h b/rpc/xdr/src/glusterd1-xdr.h
index 4115ff7a8..91965e081 100644
--- a/rpc/xdr/src/glusterd1-xdr.h
+++ b/rpc/xdr/src/glusterd1-xdr.h
@@ -247,6 +247,140 @@ struct gd1_mgmt_volume_unlock_rsp {
};
typedef struct gd1_mgmt_volume_unlock_rsp gd1_mgmt_volume_unlock_rsp;
+struct gd1_jarvis_vol_lock_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_jarvis_vol_lock_req gd1_jarvis_vol_lock_req;
+
+struct gd1_jarvis_vol_lock_rsp {
+ u_char uuid[16];
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_jarvis_vol_lock_rsp gd1_jarvis_vol_lock_rsp;
+
+struct gd1_jarvis_pre_val_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_jarvis_pre_val_req gd1_jarvis_pre_val_req;
+
+struct gd1_jarvis_pre_val_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ char *op_errstr;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_jarvis_pre_val_rsp gd1_jarvis_pre_val_rsp;
+
+struct gd1_jarvis_brick_op_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_jarvis_brick_op_req gd1_jarvis_brick_op_req;
+
+struct gd1_jarvis_brick_op_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ char *op_errstr;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_jarvis_brick_op_rsp gd1_jarvis_brick_op_rsp;
+
+struct gd1_jarvis_commit_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_jarvis_commit_req gd1_jarvis_commit_req;
+
+struct gd1_jarvis_commit_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ char *op_errstr;
+};
+typedef struct gd1_jarvis_commit_rsp gd1_jarvis_commit_rsp;
+
+struct gd1_jarvis_post_val_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_jarvis_post_val_req gd1_jarvis_post_val_req;
+
+struct gd1_jarvis_post_val_rsp {
+ u_char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ char *op_errstr;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_jarvis_post_val_rsp gd1_jarvis_post_val_rsp;
+
+struct gd1_jarvis_vol_unlock_req {
+ u_char uuid[16];
+ int op;
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+};
+typedef struct gd1_jarvis_vol_unlock_req gd1_jarvis_vol_unlock_req;
+
+struct gd1_jarvis_vol_unlock_rsp {
+ u_char uuid[16];
+ struct {
+ u_int dict_len;
+ char *dict_val;
+ } dict;
+ int op_ret;
+ int op_errno;
+};
+typedef struct gd1_jarvis_vol_unlock_rsp gd1_jarvis_vol_unlock_rsp;
+
/* the xdr functions */
#if defined(__STDC__) || defined(__cplusplus)
@@ -273,6 +407,18 @@ extern bool_t xdr_gd1_mgmt_volume_lock_req (XDR *, gd1_mgmt_volume_lock_req*);
extern bool_t xdr_gd1_mgmt_volume_lock_rsp (XDR *, gd1_mgmt_volume_lock_rsp*);
extern bool_t xdr_gd1_mgmt_volume_unlock_req (XDR *, gd1_mgmt_volume_unlock_req*);
extern bool_t xdr_gd1_mgmt_volume_unlock_rsp (XDR *, gd1_mgmt_volume_unlock_rsp*);
+extern bool_t xdr_gd1_jarvis_vol_lock_req (XDR *, gd1_jarvis_vol_lock_req*);
+extern bool_t xdr_gd1_jarvis_vol_lock_rsp (XDR *, gd1_jarvis_vol_lock_rsp*);
+extern bool_t xdr_gd1_jarvis_pre_val_req (XDR *, gd1_jarvis_pre_val_req*);
+extern bool_t xdr_gd1_jarvis_pre_val_rsp (XDR *, gd1_jarvis_pre_val_rsp*);
+extern bool_t xdr_gd1_jarvis_brick_op_req (XDR *, gd1_jarvis_brick_op_req*);
+extern bool_t xdr_gd1_jarvis_brick_op_rsp (XDR *, gd1_jarvis_brick_op_rsp*);
+extern bool_t xdr_gd1_jarvis_commit_req (XDR *, gd1_jarvis_commit_req*);
+extern bool_t xdr_gd1_jarvis_commit_rsp (XDR *, gd1_jarvis_commit_rsp*);
+extern bool_t xdr_gd1_jarvis_post_val_req (XDR *, gd1_jarvis_post_val_req*);
+extern bool_t xdr_gd1_jarvis_post_val_rsp (XDR *, gd1_jarvis_post_val_rsp*);
+extern bool_t xdr_gd1_jarvis_vol_unlock_req (XDR *, gd1_jarvis_vol_unlock_req*);
+extern bool_t xdr_gd1_jarvis_vol_unlock_rsp (XDR *, gd1_jarvis_vol_unlock_rsp*);
#else /* K&R C */
extern bool_t xdr_glusterd_volume_status ();
@@ -298,6 +444,18 @@ extern bool_t xdr_gd1_mgmt_volume_lock_req ();
extern bool_t xdr_gd1_mgmt_volume_lock_rsp ();
extern bool_t xdr_gd1_mgmt_volume_unlock_req ();
extern bool_t xdr_gd1_mgmt_volume_unlock_rsp ();
+extern bool_t xdr_gd1_jarvis_vol_lock_req ();
+extern bool_t xdr_gd1_jarvis_vol_lock_rsp ();
+extern bool_t xdr_gd1_jarvis_pre_val_req ();
+extern bool_t xdr_gd1_jarvis_pre_val_rsp ();
+extern bool_t xdr_gd1_jarvis_brick_op_req ();
+extern bool_t xdr_gd1_jarvis_brick_op_rsp ();
+extern bool_t xdr_gd1_jarvis_commit_req ();
+extern bool_t xdr_gd1_jarvis_commit_rsp ();
+extern bool_t xdr_gd1_jarvis_post_val_req ();
+extern bool_t xdr_gd1_jarvis_post_val_rsp ();
+extern bool_t xdr_gd1_jarvis_vol_unlock_req ();
+extern bool_t xdr_gd1_jarvis_vol_unlock_rsp ();
#endif /* K&R C */
diff --git a/rpc/xdr/src/glusterd1-xdr.x b/rpc/xdr/src/glusterd1-xdr.x
index f29a9d214..a0cbf548d 100644
--- a/rpc/xdr/src/glusterd1-xdr.x
+++ b/rpc/xdr/src/glusterd1-xdr.x
@@ -154,3 +154,89 @@ struct gd1_mgmt_volume_unlock_rsp {
int op_ret;
int op_errno;
} ;
+
+struct gd1_jarvis_vol_lock_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_jarvis_vol_lock_rsp {
+ unsigned char uuid[16];
+ opaque dict<>;
+ int op_ret;
+ int op_errno;
+} ;
+
+struct gd1_jarvis_pre_val_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_jarvis_pre_val_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ string op_errstr<>;
+ opaque dict<>;
+} ;
+
+struct gd1_jarvis_brick_op_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_jarvis_brick_op_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ string op_errstr<>;
+ opaque dict<>;
+} ;
+
+struct gd1_jarvis_commit_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_jarvis_commit_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ opaque dict<>;
+ string op_errstr<>;
+} ;
+
+struct gd1_jarvis_post_val_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_jarvis_post_val_rsp {
+ unsigned char uuid[16];
+ int op;
+ int op_ret;
+ int op_errno;
+ string op_errstr<>;
+ opaque dict<>;
+} ;
+
+struct gd1_jarvis_vol_unlock_req {
+ unsigned char uuid[16];
+ int op;
+ opaque dict<>;
+} ;
+
+struct gd1_jarvis_vol_unlock_rsp {
+ unsigned char uuid[16];
+ opaque dict<>;
+ int op_ret;
+ int op_errno;
+} ;
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index 4e3df096c..9ee56529a 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -12,7 +12,8 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-geo-rep.c glusterd-replace-brick.c glusterd-log-ops.c \
glusterd-volume-ops.c glusterd-brick-ops.c glusterd-mountbroker.c \
glusterd-syncop.c glusterd-hooks.c glusterd-volume-set.c \
- glusterd-locks.c glusterd-snapshot.c
+ glusterd-locks.c glusterd-snapshot.c glusterd-jarvis-handler.c \
+ glusterd-jarvis.c
glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la \
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index 2aef32ba1..cc6744eba 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -30,6 +30,7 @@
extern struct rpc_clnt_program gd_peer_prog;
extern struct rpc_clnt_program gd_mgmt_prog;
+extern struct rpc_clnt_program gd_jarvis_prog;
#define TRUSTED_PREFIX "trusted-"
@@ -793,6 +794,7 @@ __glusterd_mgmt_hndsk_version_ack_cbk (struct rpc_req *req, struct iovec *iov,
*/
peerinfo->mgmt = &gd_mgmt_prog;
peerinfo->peer = &gd_peer_prog;
+ peerinfo->jarvis = &gd_jarvis_prog;
ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
diff --git a/xlators/mgmt/glusterd/src/glusterd-jarvis-handler.c b/xlators/mgmt/glusterd/src/glusterd-jarvis-handler.c
new file mode 100644
index 000000000..f07b54ac6
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-jarvis-handler.c
@@ -0,0 +1,771 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+/* rpc related syncops */
+#include "rpc-clnt.h"
+#include "protocol-common.h"
+#include "xdr-generic.h"
+#include "glusterd1-xdr.h"
+#include "glusterd-syncop.h"
+
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-locks.h"
+#include "glusterd-jarvis.h"
+
+static int
+glusterd_jarvis_null (rpcsvc_request_t *req)
+{
+ return 0;
+}
+
+static int
+glusterd_jarvis_vol_lock_send_resp (rpcsvc_request_t *req, int32_t status)
+{
+
+ gd1_jarvis_vol_lock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+
+ glusterd_get_uuid (&rsp.uuid);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_jarvis_vol_lock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Responded to volume lock, ret: %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_handle_vol_lock_fn (rpcsvc_request_t *req)
+{
+ gd1_jarvis_vol_lock_req lock_req = {{0},};
+ int32_t ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ dict_t *dict = NULL;
+ char *volname = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_jarvis_vol_lock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG, "Received volume lock req "
+ "from uuid: %s", uuid_utoa (lock_req.uuid));
+
+ if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (lock_req.dict.dict_val,
+ lock_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire volname");
+ else {
+ ret = glusterd_volume_lock (volname, lock_req.uuid);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire lock for %s",
+ volname);
+
+ glusterd_jarvis_vol_lock_send_resp (req, ret);
+ }
+
+out:
+ if (dict)
+ dict_unref (dict);
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_jarvis_pre_validate_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
+{
+ gd1_jarvis_pre_val_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_jarvis_pre_val_rsp);
+
+ GF_FREE (rsp.dict.dict_val);
+out:
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to pre validation, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_pre_validate_fn (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_jarvis_pre_val_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_jarvis_pre_val_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode pre validation "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (op_req.dict.dict_val,
+ op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_jarvis_pre_validate_fn (op_req.op, dict, &op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Pre Validation failed on operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+ ret = glusterd_jarvis_pre_validate_send_resp (req, op_req.op,
+ ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to send Pre Validation "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp (op_errstr, "")))
+ GF_FREE (op_errstr);
+
+ free (op_req.dict.dict_val);
+
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_jarvis_brick_op_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
+{
+ gd1_jarvis_brick_op_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_jarvis_brick_op_rsp);
+
+ GF_FREE (rsp.dict.dict_val);
+out:
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to brick op, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_brick_op_fn (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_jarvis_brick_op_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_jarvis_brick_op_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode brick op "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (op_req.dict.dict_val,
+ op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_jarvis_brick_op_fn (op_req.op, dict, &op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Brick Op failed on operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+ ret = glusterd_jarvis_brick_op_send_resp (req, op_req.op,
+ ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to send brick op "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp (op_errstr, "")))
+ GF_FREE (op_errstr);
+
+ free (op_req.dict.dict_val);
+
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_jarvis_commit_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
+{
+ gd1_jarvis_commit_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_jarvis_commit_rsp);
+
+ GF_FREE (rsp.dict.dict_val);
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Responded to commit, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_commit_fn (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_jarvis_commit_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_jarvis_commit_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode commit "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (op_req.dict.dict_val,
+ op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_jarvis_commit_fn (op_req.op, dict, &op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "commit failed on operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+ ret = glusterd_jarvis_commit_send_resp (req, op_req.op,
+ ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to send commit "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp (op_errstr, "")))
+ GF_FREE (op_errstr);
+
+ free (op_req.dict.dict_val);
+
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_jarvis_post_validate_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
+{
+ gd1_jarvis_post_val_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_jarvis_post_val_rsp);
+
+ GF_FREE (rsp.dict.dict_val);
+out:
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to post validation, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_post_validate_fn (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_jarvis_post_val_req op_req = {{0},};
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_jarvis_post_val_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to decode post validation "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_friend_find_by_uuid (op_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize (op_req.dict.dict_val,
+ op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new ();
+ if (!rsp_dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_jarvis_post_validate_fn (op_req.op, dict, &op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Post Validation failed on operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+ ret = glusterd_jarvis_post_validate_send_resp (req, op_req.op,
+ ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to send Post Validation "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp (op_errstr, "")))
+ GF_FREE (op_errstr);
+
+ free (op_req.dict.dict_val);
+
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_jarvis_vol_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
+{
+
+ gd1_jarvis_vol_unlock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+
+ glusterd_get_uuid (&rsp.uuid);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_jarvis_vol_unlock_rsp);
+
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Responded to volume unlock, ret: %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_handle_vol_unlock_fn (rpcsvc_request_t *req)
+{
+ gd1_jarvis_vol_unlock_req unlock_req = {{0},};
+ int32_t ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ dict_t *dict = NULL;
+ char *volname = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+
+ ret = xdr_to_generic (req->msg[0], &unlock_req,
+ (xdrproc_t)xdr_gd1_jarvis_vol_unlock_req);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG, "Received volume unlock req "
+ "from uuid: %s", uuid_utoa (unlock_req.uuid));
+
+ if (glusterd_friend_find_by_uuid (unlock_req.uuid, &peerinfo)) {
+ gf_log (this->name, GF_LOG_WARNING, "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa (unlock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new ();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize (unlock_req.dict.dict_val,
+ unlock_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to acquire volname");
+ else {
+ ret = glusterd_volume_unlock (volname, unlock_req.uuid);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to release lock for %s",
+ volname);
+
+ glusterd_jarvis_vol_unlock_send_resp (req, ret);
+ }
+
+out:
+ if (dict)
+ dict_unref (dict);
+
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_vol_lock (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_vol_lock_fn);
+}
+
+static int
+glusterd_handle_pre_validate (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_pre_validate_fn);
+}
+
+static int
+glusterd_handle_brick_op (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_brick_op_fn);
+}
+
+static int
+glusterd_handle_commit (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_commit_fn);
+}
+
+static int
+glusterd_handle_post_validate (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_post_validate_fn);
+}
+
+static int
+glusterd_handle_vol_unlock (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ glusterd_handle_vol_unlock_fn);
+}
+
+rpcsvc_actor_t gd_jarvis_mgmt_actors[] = {
+ [GLUSTERD_JARVIS_NULL] = { "NULL", GLUSTERD_JARVIS_NULL, glusterd_jarvis_null, NULL, 0, DRC_NA},
+ [GLUSTERD_JARVIS_VOLUME_LOCK] = { "VOL_LOCK", GLUSTERD_JARVIS_VOLUME_LOCK, glusterd_handle_vol_lock, NULL, 0, DRC_NA},
+ [GLUSTERD_JARVIS_PRE_VALIDATE] = { "PRE_VAL", GLUSTERD_JARVIS_PRE_VALIDATE, glusterd_handle_pre_validate, NULL, 0, DRC_NA},
+ [GLUSTERD_JARVIS_BRICK_OP] = { "BRCK_OP", GLUSTERD_JARVIS_BRICK_OP, glusterd_handle_brick_op, NULL, 0, DRC_NA},
+ [GLUSTERD_JARVIS_COMMIT] = { "COMMIT", GLUSTERD_JARVIS_COMMIT, glusterd_handle_commit, NULL, 0, DRC_NA},
+ [GLUSTERD_JARVIS_POST_VALIDATE] = { "POST_VAL", GLUSTERD_JARVIS_POST_VALIDATE, glusterd_handle_post_validate, NULL, 0, DRC_NA},
+ [GLUSTERD_JARVIS_VOLUME_UNLOCK] = { "VOL_UNLOCK", GLUSTERD_JARVIS_VOLUME_LOCK, glusterd_handle_vol_unlock, NULL, 0, DRC_NA},
+};
+
+struct rpcsvc_program gd_jarvis_mgmt_prog = {
+ .progname = "Glusterd Jarvis Mgmt",
+ .prognum = GD_JARVIS_MGMT_PROGRAM,
+ .progver = GD_JARVIS_MGMT_VERSION,
+ .numactors = GLUSTERD_JARVIS_MAXVALUE,
+ .actors = gd_jarvis_mgmt_actors,
+ .synctask = _gf_true,
+};
diff --git a/xlators/mgmt/glusterd/src/glusterd-jarvis.c b/xlators/mgmt/glusterd/src/glusterd-jarvis.c
new file mode 100644
index 000000000..bd8d48836
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-jarvis.c
@@ -0,0 +1,67 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+/* rpc related syncops */
+#include "rpc-clnt.h"
+#include "protocol-common.h"
+#include "xdr-generic.h"
+#include "glusterd1-xdr.h"
+#include "glusterd-syncop.h"
+
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-jarvis.h"
+
+int32_t
+gd_jarvis_pre_validate_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
+{
+ int ret = -1;
+ xlator_t *this = THIS;
+
+ ret = 0;
+ gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
+
+int32_t
+gd_jarvis_brick_op_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
+{
+ int ret = -1;
+ xlator_t *this = THIS;
+
+ ret = 0;
+ gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
+
+int32_t
+gd_jarvis_commit_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
+{
+ int ret = -1;
+ xlator_t *this = THIS;
+
+ ret = 0;
+ gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
+
+int32_t
+gd_jarvis_post_validate_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
+{
+ int ret = -1;
+ xlator_t *this = THIS;
+
+ ret = 0;
+ gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-jarvis.h b/xlators/mgmt/glusterd/src/glusterd-jarvis.h
new file mode 100644
index 000000000..032632e86
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-jarvis.h
@@ -0,0 +1,34 @@
+/*
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _GLUSTERD_JARVIS_H_
+#define _GLUSTERD_JARVIS_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+int32_t
+gd_jarvis_pre_validate_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict);
+
+int32_t
+gd_jarvis_brick_op_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict);
+
+int32_t
+gd_jarvis_commit_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict);
+
+int32_t
+gd_jarvis_post_validate_fn (glusterd_op_t op, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 3537404a8..75b5d1945 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -1934,4 +1934,10 @@ struct rpc_clnt_program gd_peer_prog = {
.numproc = GLUSTERD_FRIEND_MAXVALUE,
};
-
+struct rpc_clnt_program gd_jarvis_prog = {
+ .progname = "Glusterd Jarvis RPC Client",
+ .prognum = GD_JARVIS_MGMT_PROGRAM,
+ .progver = GD_JARVIS_MGMT_VERSION,
+ .proctable = NULL,
+ .numproc = GLUSTERD_JARVIS_MAXVALUE,
+};
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.h b/xlators/mgmt/glusterd/src/glusterd-sm.h
index e29bb7277..5eee09adb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.h
@@ -97,6 +97,7 @@ struct glusterd_peerinfo_ {
struct rpc_clnt *rpc;
rpc_clnt_prog_t *mgmt;
rpc_clnt_prog_t *peer;
+ rpc_clnt_prog_t *jarvis;
int connected;
gf_store_handle_t *shandle;
glusterd_sm_tr_log_t sm_log;
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index a870c0b13..20f6956ec 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -47,6 +47,7 @@
extern struct rpcsvc_program gluster_handshake_prog;
extern struct rpcsvc_program gluster_pmap_prog;
extern glusterd_op_info_t opinfo;
+extern struct rpcsvc_program gd_jarvis_mgmt_prog;
extern struct rpcsvc_program gd_svc_mgmt_prog;
extern struct rpcsvc_program gd_svc_peer_prog;
extern struct rpcsvc_program gd_svc_cli_prog;
@@ -63,6 +64,7 @@ struct rpcsvc_program *all_programs[] = {
&gd_svc_peer_prog,
&gd_svc_cli_prog,
&gd_svc_mgmt_prog,
+ &gd_jarvis_mgmt_prog,
&gluster_pmap_prog,
&gluster_handshake_prog,
&glusterd_mgmt_hndsk_prog,