summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r--xlators/mgmt/glusterd/Makefile.am3
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am17
-rw-r--r--xlators/mgmt/glusterd/src/gd-xdr.c161
-rw-r--r--xlators/mgmt/glusterd/src/gd-xdr.h83
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ha.c138
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ha.h47
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c1388
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h51
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c1041
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h167
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c384
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.h102
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c436
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h80
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c478
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h218
-rw-r--r--xlators/mgmt/glusterd/src/glusterd3_1-mops.c938
17 files changed, 5732 insertions, 0 deletions
diff --git a/xlators/mgmt/glusterd/Makefile.am b/xlators/mgmt/glusterd/Makefile.am
new file mode 100644
index 00000000000..d471a3f9243
--- /dev/null
+++ b/xlators/mgmt/glusterd/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
new file mode 100644
index 00000000000..defeca712b6
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -0,0 +1,17 @@
+xlator_LTLIBRARIES = glusterd.la
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/mgmt
+glusterd_la_LDFLAGS = -module -avoidversion
+glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c glusterd-op-sm.c \
+ glusterd-utils.c glusterd3_1-mops.c gd-xdr.c glusterd-ha.c
+glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la -luuid \
+ $(top_builddir)/xlators/protocol/lib/src/libgfproto1.la\
+ $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la
+
+noinst_HEADERS = glusterd.h gd-xdr.h glusterd-utils.h glusterd-op-sm.h glusterd-sm.h glusterd-ha.h glusterd-mem-types.h
+
+AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\
+ -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS)\
+ -I$(rpclibdir) -L$(xlatordir)/ -I$(CONTRIBDIR)/rbtree -I$(top_srcdir)/xlators/protocol/lib/src\
+ -I$(top_srcdir)/rpc/rpc-lib/src
+
+CLEANFILES =
diff --git a/xlators/mgmt/glusterd/src/gd-xdr.c b/xlators/mgmt/glusterd/src/gd-xdr.c
new file mode 100644
index 00000000000..4e1bfd07e6f
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/gd-xdr.c
@@ -0,0 +1,161 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#include "gd-xdr.h"
+
+
+ssize_t
+gd_xdr_serialize_mgmt_probe_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_friend_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_cluster_lock_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_cluster_unlock_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_stage_op_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
+
+}
+
+ssize_t
+gd_xdr_serialize_mgmt_commit_op_rsp (struct iovec outmsg, void *rsp)
+{
+ return xdr_serialize_generic (outmsg, (void *)rsp,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
+
+}
+/* Decode */
+
+
+ssize_t
+gd_xdr_to_mgmt_probe_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_probe_req);
+}
+
+ssize_t
+gd_xdr_to_mgmt_friend_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+}
+
+ssize_t
+gd_xdr_to_mgmt_cluster_lock_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
+}
+
+ssize_t
+gd_xdr_to_mgmt_cluster_unlock_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
+}
+
+ssize_t
+gd_xdr_to_mgmt_stage_op_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
+}
+
+
+ssize_t
+gd_xdr_to_mgmt_commit_op_req (struct iovec inmsg, void *args)
+{
+ return xdr_to_generic (inmsg, (void *)args,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
+}
+ssize_t
+gd_xdr_from_mgmt_probe_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_probe_req);
+
+}
+
+ssize_t
+gd_xdr_from_mgmt_friend_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+
+}
+
+ssize_t
+gd_xdr_from_mgmt_cluster_lock_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
+
+}
+
+ssize_t
+gd_xdr_from_mgmt_cluster_unlock_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
+
+}
+
+ssize_t
+gd_xdr_from_mgmt_stage_op_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
+}
+
+
+ssize_t
+gd_xdr_from_mgmt_commit_op_req (struct iovec outmsg, void *req)
+{
+ return xdr_serialize_generic (outmsg, (void *)req,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
+}
diff --git a/xlators/mgmt/glusterd/src/gd-xdr.h b/xlators/mgmt/glusterd/src/gd-xdr.h
new file mode 100644
index 00000000000..55e2f8e6dc3
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/gd-xdr.h
@@ -0,0 +1,83 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _MSG_GD_XDR_H
+#define _MSG_GD_XDR_H
+
+#include <sys/uio.h>
+
+#include "msg-xdr.h"
+#include "glusterd1.h"
+
+ssize_t
+gd_xdr_to_mgmt_probe_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_probe_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_probe_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_friend_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_friend_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_friend_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_cluster_lock_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_cluster_lock_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_cluster_lock_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_cluster_unlock_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_cluster_unlock_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_cluster_unlock_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_stage_op_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_stage_op_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_stage_op_req (struct iovec outmsg, void *req);
+
+ssize_t
+gd_xdr_to_mgmt_commit_op_req (struct iovec inmsg, void *args);
+
+ssize_t
+gd_xdr_serialize_mgmt_commit_op_rsp (struct iovec outmsg, void *rsp);
+
+ssize_t
+gd_xdr_from_mgmt_commit_op_req (struct iovec outmsg, void *req);
+
+#endif /* !_MSG_GD_XDR_H */
diff --git a/xlators/mgmt/glusterd/src/glusterd-ha.c b/xlators/mgmt/glusterd/src/glusterd-ha.c
new file mode 100644
index 00000000000..1c049e5f720
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-ha.c
@@ -0,0 +1,138 @@
+/*
+ Copyright (c) 2007-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <inttypes.h>
+
+
+#include "globals.h"
+#include "glusterfs.h"
+#include "compat.h"
+#include "dict.h"
+#include "protocol-common.h"
+//#include "transport.h"
+#include "xlator.h"
+#include "logging.h"
+#include "timer.h"
+#include "defaults.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+#include "glusterd-mem-types.h"
+#include "glusterd.h"
+#include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-utils.h"
+#include "glusterd-ha.h"
+#include "gd-xdr.h"
+#include "cli-xdr.h"
+#include "rpc-clnt.h"
+
+#include <sys/resource.h>
+#include <inttypes.h>
+
+int32_t
+glusterd_ha_create_volume (glusterd_volinfo_t *volinfo)
+{
+ char pathname[PATH_MAX] = {0,};
+ int32_t ret = -1;
+ char filepath[PATH_MAX] = {0,};
+ char buf[4096] = {0,};
+ int fd = -1;
+
+ GF_ASSERT (volinfo);
+ snprintf (pathname, 1024, "%s/vols/%s", GLUSTERD_DEFAULT_WORKDIR,
+ volinfo->volname);
+
+ ret = mkdir (pathname, 0x777);
+
+ if (-1 == ret) {
+ gf_log ("", GF_LOG_ERROR, "mkdir() failed on path %s,"
+ "errno: %d", pathname, errno);
+ goto out;
+ }
+
+ snprintf (filepath, 1024, "%s/info", pathname);
+
+ fd = open (filepath, O_RDWR | O_CREAT | O_APPEND, 0644);
+
+ if (-1 == fd) {
+ gf_log ("", GF_LOG_ERROR, "open() failed on path %s,"
+ "errno: %d", filepath, errno);
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (buf, 4096, "type=%d\n", volinfo->type);
+ ret = write (fd, buf, strlen (buf));
+ snprintf (buf, 4096, "count=%d\n", volinfo->brick_count);
+ ret = write (fd, buf, strlen (buf));
+ close (fd);
+
+ ret = 0;
+
+out:
+ if (ret) {
+ glusterd_ha_delete_volume (volinfo);
+ }
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+
+int32_t
+glusterd_ha_delete_volume (glusterd_volinfo_t *volinfo)
+{
+ char pathname[PATH_MAX] = {0,};
+ int32_t ret = -1;
+ char filepath[PATH_MAX] = {0,};
+
+ GF_ASSERT (volinfo);
+ snprintf (pathname, 1024, "%s/vols/%s", GLUSTERD_DEFAULT_WORKDIR,
+ volinfo->volname);
+
+ snprintf (filepath, 1024, "%s/info", pathname);
+ ret = unlink (filepath);
+
+ if (-1 == ret) {
+ gf_log ("", GF_LOG_ERROR, "unlink() failed on path %s,"
+ "errno: %d", filepath, errno);
+ goto out;
+ }
+
+ ret = rmdir (pathname);
+
+ if (-1 == ret) {
+ gf_log ("", GF_LOG_ERROR, "rmdir() failed on path %s,"
+ "errno: %d", pathname, errno);
+ goto out;
+ }
+
+out:
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-ha.h b/xlators/mgmt/glusterd/src/glusterd-ha.h
new file mode 100644
index 00000000000..6b01f90600c
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-ha.h
@@ -0,0 +1,47 @@
+/*
+ Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_HA_H_
+#define _GLUSTERD_HA_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+#include "glusterd.h"
+#include "rpcsvc.h"
+
+int32_t
+glusterd_ha_create_volume (glusterd_volinfo_t *volinfo);
+
+int32_t
+glusterd_ha_delete_volume (glusterd_volinfo_t *volinfo);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
new file mode 100644
index 00000000000..00067a566d4
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -0,0 +1,1388 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <inttypes.h>
+
+
+#include "globals.h"
+#include "glusterfs.h"
+#include "compat.h"
+#include "dict.h"
+#include "protocol-common.h"
+#include "xlator.h"
+#include "logging.h"
+#include "timer.h"
+#include "defaults.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+#include "glusterd-mem-types.h"
+#include "glusterd.h"
+#include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-utils.h"
+#include "gd-xdr.h"
+#include "cli-xdr.h"
+#include "rpc-clnt.h"
+
+#include <sys/resource.h>
+#include <inttypes.h>
+
+/* for default_*_cbk functions */
+#include "defaults.c"
+#include "common-utils.h"
+
+
+/*typedef int32_t (*glusterd_mop_t) (call_frame_t *frame,
+ gf_hdr_common_t *hdr, size_t hdrlen);*/
+
+//static glusterd_mop_t glusterd_ops[GF_MOP_MAXVALUE];
+
+
+
+static int
+glusterd_friend_find_by_hostname (const char *hoststr,
+ glusterd_peerinfo_t **peerinfo)
+{
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *entry = NULL;
+
+ GF_ASSERT (hoststr);
+ GF_ASSERT (peerinfo);
+
+ *peerinfo = NULL;
+ priv = THIS->private;
+
+ GF_ASSERT (priv);
+
+ list_for_each_entry (entry, &priv->peers, uuid_list) {
+ if (entry->hostname && (!strncmp (entry->hostname, hoststr,
+ sizeof (entry->hostname)))) {
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Friend %s found.. state: %d", hoststr,
+ entry->state.state);
+ *peerinfo = entry;
+ return 0;
+ }
+ }
+
+ return ret;
+}
+
+static int
+glusterd_friend_find_by_uuid (uuid_t uuid,
+ glusterd_peerinfo_t **peerinfo)
+{
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *entry = NULL;
+
+ GF_ASSERT (peerinfo);
+
+ *peerinfo = NULL;
+ priv = THIS->private;
+
+ GF_ASSERT (priv);
+
+ list_for_each_entry (entry, &priv->peers, uuid_list) {
+ if (!uuid_compare (entry->uuid, uuid)) {
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Friend found.. state: %d",
+ entry->state.state);
+ *peerinfo = entry;
+ return 0;
+ }
+ }
+
+ return ret;
+}
+
+static int
+glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid, char *hostname)
+{
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_req_ctx_t *ctx = NULL;
+
+
+ ret = glusterd_friend_find (uuid, hostname, &peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Unable to find peer");
+
+ }
+
+ ret = glusterd_friend_sm_new_event
+ (GD_FRIEND_EVENT_RCVD_FRIEND_REQ, &event);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "event generation failed: %d", ret);
+ return ret;
+ }
+
+ event->peerinfo = peerinfo;
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
+
+ if (!ctx) {
+ gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
+ ret = -1;
+ goto out;
+ }
+
+ uuid_copy (ctx->uuid, uuid);
+ if (hostname)
+ ctx->hostname = gf_strdup (hostname);
+ ctx->req = req;
+
+ event->ctx = ctx;
+
+ ret = glusterd_friend_sm_inject_event (event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, "
+ "ret = %d", event->event, ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (0 != ret) {
+ if (ctx && ctx->hostname)
+ GF_FREE (ctx->hostname);
+ if (ctx)
+ GF_FREE (ctx);
+ }
+
+ return ret;
+}
+
+
+
+
+
+int
+glusterd_friend_find (uuid_t uuid, char *hostname,
+ glusterd_peerinfo_t **peerinfo)
+{
+ int ret = -1;
+
+ if (uuid) {
+ ret = glusterd_friend_find_by_uuid (uuid, peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Unable to find peer by uuid");
+ } else {
+ goto out;
+ }
+
+ }
+
+ if (hostname) {
+ ret = glusterd_friend_find_by_hostname (hostname, peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Unable to find hostname: %s", hostname);
+ } else {
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+int
+glusterd_handle_cluster_lock (rpcsvc_request_t *req)
+{
+ gd1_mgmt_cluster_lock_req lock_req = {{0},};
+ int32_t ret = -1;
+ char str[50] = {0,};
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_lock_ctx_t *ctx = NULL;
+
+ GF_ASSERT (req);
+
+ if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &lock_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+ uuid_unparse (lock_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received LOCK from uuid: %s", str);
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_LOCK, &event);
+
+ if (ret) {
+ //respond back here
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_stage_ctx_t);
+
+ if (!ctx) {
+ //respond here
+ return -1;
+ }
+
+ uuid_copy (ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+ event->ctx = ctx;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+out:
+ gf_log ("", GF_LOG_NORMAL, "Returning %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_handle_stage_op (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ char str[50];
+ gd1_mgmt_stage_op_req *stage_req = NULL;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_stage_ctx_t *ctx = NULL;
+
+ GF_ASSERT (req);
+
+ stage_req = GF_CALLOC (1, sizeof (*stage_req),
+ gf_gld_mt_mop_stage_req_t);
+
+ GF_ASSERT (stage_req);
+
+ if (!gd_xdr_to_mgmt_stage_op_req (req->msg[0], stage_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ uuid_unparse (stage_req->uuid, str);
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received stage op from uuid: %s", str);
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_STAGE_OP, &event);
+
+ if (ret) {
+ //respond back here
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_stage_ctx_t);
+
+ if (!ctx) {
+ //respond here
+ return -1;
+ }
+
+ //CHANGE THIS
+ ctx->stage_req = &stage_req;
+ ctx->req = req;
+ event->ctx = ctx;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+out:
+ return ret;
+}
+
+int
+glusterd_handle_commit_op (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ char str[50];
+ glusterd_op_sm_event_t *event = NULL;
+ gd1_mgmt_commit_op_req commit_req = {{0},};
+ glusterd_op_commit_ctx_t *ctx = NULL;
+
+ GF_ASSERT (req);
+
+ if (!gd_xdr_to_mgmt_commit_op_req (req->msg[0], &commit_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ uuid_unparse (commit_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received commit op from uuid: %s", str);
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_COMMIT_OP, &event);
+
+ if (ret) {
+ //respond back here
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_commit_ctx_t);
+
+ if (!ctx) {
+ //respond here
+ return -1;
+ }
+
+ ctx->req = req;
+ //CHANGE THIS
+ ctx->stage_req = &commit_req;
+ event->ctx = ctx;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+out:
+ return ret;
+}
+
+int
+glusterd_handle_cli_probe (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf1_cli_probe_req cli_req = {0,};
+
+ GF_ASSERT (req);
+
+ if (!gf_xdr_to_cli_probe_req (req->msg[0], &cli_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Received CLI probe req");
+
+
+ ret = glusterd_probe_begin (req, cli_req.hostname);
+
+out:
+ return ret;
+}
+
+int
+glusterd_handle_create_volume (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf1_cli_create_vol_req cli_req = {0,};
+ dict_t *dict = NULL;
+
+ GF_ASSERT (req);
+
+ if (!gf_xdr_to_cli_create_vol_req (req->msg[0], &cli_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Received create volume req");
+
+ if (cli_req.bricks.bricks_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
+
+ ret = dict_unserialize (cli_req.bricks.bricks_val,
+ cli_req.bricks.bricks_len,
+ &dict);
+ if (ret < 0) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ }
+
+ ret = glusterd_create_volume (req, dict);
+
+out:
+ return ret;
+}
+
+int
+glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status)
+{
+
+ gd1_mgmt_cluster_lock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op_ret = status;
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_cluster_lock_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded, ret: %d", ret);
+
+ return 0;
+}
+
+int
+glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
+{
+
+ gd1_mgmt_cluster_unlock_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_cluster_unlock_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to unlock, ret: %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
+{
+ gd1_mgmt_cluster_unlock_req unlock_req = {{0}, };
+ int32_t ret = -1;
+ char str[50] = {0, };
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_op_sm_event_t *event = NULL;
+
+ GF_ASSERT (req);
+
+ if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &unlock_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ uuid_unparse (unlock_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received UNLOCK from uuid: %s", str);
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_UNLOCK, &event);
+
+ if (ret) {
+ //respond back here
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
+
+ if (!ctx) {
+ //respond here
+ return -1;
+ }
+ event->ctx = ctx;
+ uuid_copy (ctx->uuid, unlock_req.uuid);
+ ctx->req = req;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+out:
+ return ret;
+}
+
+int
+glusterd_op_stage_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status)
+{
+
+ gd1_mgmt_stage_op_rsp rsp = {{0},};
+ int ret = -1;
+
+ GF_ASSERT (req);
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_stage_op_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to stage, ret: %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_op_commit_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status)
+{
+ gd1_mgmt_commit_op_rsp rsp = {{0}, };
+ int ret = -1;
+
+ GF_ASSERT (req);
+ rsp.op_ret = status;
+ glusterd_get_uuid (&rsp.uuid);
+ rsp.op = op;
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_commit_op_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to commit, ret: %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_friend_req friend_req = {{0},};
+ char str[50];
+
+ GF_ASSERT (req);
+ if (!gd_xdr_to_mgmt_friend_req (req->msg[0], &friend_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+ uuid_unparse (friend_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received probe from uuid: %s", str);
+
+ ret = glusterd_handle_friend_req (req, friend_req.uuid,
+ friend_req.hostname);
+
+out:
+
+ return ret;
+}
+
+int
+glusterd_handle_probe_query (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ char str[50];
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gd1_mgmt_probe_req probe_req = {{0},};
+ gd1_mgmt_probe_rsp rsp = {{0},};
+ char hostname[1024] = {0};
+
+ GF_ASSERT (req);
+
+ probe_req.hostname = hostname;
+
+ if (!gd_xdr_to_mgmt_probe_req (req->msg[0], &probe_req)) {
+ //failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+
+ uuid_unparse (probe_req.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received probe from uuid: %s", str);
+
+
+ this = THIS;
+
+ conf = this->private;
+
+ uuid_copy (rsp.uuid, conf->uuid);
+ rsp.hostname = gf_strdup (probe_req.hostname);
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_probe_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to %s, ret: %d", probe_req.hostname, ret);
+
+out:
+ return ret;
+}
+
+/*int
+glusterd_handle_friend_req_resp (call_frame_t *frame,
+ gf_hdr_common_t *rsp_hdr, size_t hdrlen)
+{
+ gf_mop_probe_rsp_t *rsp = NULL;
+ int32_t ret = -1;
+ char str[50];
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int32_t op_ret = -1;
+ glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
+ glusterd_friend_sm_event_t *event = NULL;
+
+ GF_ASSERT (rsp_hdr);
+
+ rsp = gf_param (rsp_hdr);
+ uuid_unparse (rsp->uuid, str);
+
+ op_ret = rsp_hdr->rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s, host: %s",
+ (op_ret)?"RJT":"ACC", str, rsp->hostname);
+
+ ret = glusterd_friend_find (rsp->uuid, rsp->hostname, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_FRIEND_EVENT_RCVD_ACC;
+ else
+ event_type = GD_FRIEND_EVENT_RCVD_RJT;
+
+ ret = glusterd_friend_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ return ret;
+ }
+
+ event->peerinfo = peerinfo;
+ ret = glusterd_friend_sm_inject_event (event);
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Received resp to friend req");
+
+ return 0;
+}*/
+
+/*int
+glusterd_handle_probe_resp (call_frame_t *frame, gf_hdr_common_t *rsp_hdr,
+ size_t hdrlen)
+{
+ gf_mop_probe_rsp_t *rsp = NULL;
+ int32_t ret = -1;
+ char str[50];
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_peerinfo_t *dup_peerinfo = NULL;
+
+ GF_ASSERT (rsp_hdr);
+
+ rsp = gf_param (rsp_hdr);
+ uuid_unparse (rsp->uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received probe resp from uuid: %s, host: %s",
+ str, rsp->hostname);
+
+ ret = glusterd_friend_find (rsp->uuid, rsp->hostname, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (!peerinfo->hostname) {
+ glusterd_friend_find_by_hostname (rsp->hostname, &dup_peerinfo);
+ GF_ASSERT (dup_peerinfo);
+ GF_ASSERT (dup_peerinfo->hostname);
+ peerinfo->hostname = gf_strdup (rsp->hostname);
+ peerinfo->trans = dup_peerinfo->trans;
+ list_del_init (&dup_peerinfo->uuid_list);
+ GF_FREE (dup_peerinfo->hostname);
+ GF_FREE (dup_peerinfo);
+ }
+ GF_ASSERT (peerinfo->hostname);
+ uuid_copy (peerinfo->uuid, rsp->uuid);
+
+ ret = glusterd_friend_sm_new_event
+ (GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ return ret;
+ }
+
+ event->peerinfo = peerinfo;
+ ret = glusterd_friend_sm_inject_event (event);
+
+ return 0;
+}*/
+
+/*
+static glusterd_mop_t glusterd_ops[GF_MOP_MAXVALUE] = {
+ [GF_MOP_PROBE_QUERY] = glusterd_handle_probe_query,
+ [GF_MOP_FRIEND_REQ] = glusterd_handle_incoming_friend_req,
+ [GF_MOP_STAGE_OP] = glusterd_handle_stage_op,
+ [GF_MOP_COMMIT_OP] = glusterd_handle_commit_op,
+ [GF_MOP_CLUSTER_LOCK] = glusterd_handle_cluster_lock,
+ [GF_MOP_CLUSTER_UNLOCK] = glusterd_handle_cluster_unlock,
+};
+
+static glusterd_mop_t glusterd_resp_ops [GF_MOP_MAXVALUE] = {
+ [GF_MOP_PROBE_QUERY] = glusterd_handle_probe_resp,
+ [GF_MOP_FRIEND_REQ] = glusterd_handle_friend_req_resp,
+};
+*/
+
+/*int
+glusterd_xfer_probe_msg (glusterd_peerinfo_t *peerinfo, xlator_t *this)
+{
+ gf_hdr_common_t *hdr = NULL;
+ gf_mop_probe_req_t *req = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ int len = 0;
+
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ len = STRLEN_0 (peerinfo->hostname);
+ hdrlen = gf_hdr_len (req, len);
+ hdr = gf_hdr_new (req, len);
+
+ GF_VALIDATE_OR_GOTO (this->name, hdr, unwind);
+
+ req = gf_param (hdr);
+ memcpy (&req->uuid, &priv->uuid, sizeof(uuid_t));
+ strncpy (req->hostname, peerinfo->hostname, len);
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto unwind;
+
+ dummy_frame->local = peerinfo->trans;
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST, GF_MOP_PROBE_QUERY,
+ hdr, hdrlen, NULL, 0, NULL);
+
+ return ret;
+
+unwind:
+ if (hdr)
+ GF_FREE (hdr);
+
+ return 0;
+}*/
+
+/*int
+glusterd_xfer_friend_req_msg (glusterd_peerinfo_t *peerinfo, xlator_t *this)
+{
+ gf_hdr_common_t *hdr = NULL;
+ gf_mop_probe_req_t *req = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ int len = 0;
+
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ len = STRLEN_0 (peerinfo->hostname);
+ hdrlen = gf_hdr_len (req, len);
+ hdr = gf_hdr_new (req, len);
+
+ GF_VALIDATE_OR_GOTO (this->name, hdr, unwind);
+
+ req = gf_param (hdr);
+ memcpy (&req->uuid, &priv->uuid, sizeof(uuid_t));
+ strncpy (req->hostname, peerinfo->hostname, len);
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto unwind;
+
+ dummy_frame->local = peerinfo->trans;
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST, GF_MOP_FRIEND_REQ,
+ hdr, hdrlen, NULL, 0, NULL);
+
+ return ret;
+
+unwind:
+ if (hdr)
+ GF_FREE (hdr);
+
+ //STACK_UNWIND (frame, -1, EINVAL, NULL);
+ return 0;
+}*/
+
+/*int
+glusterd_xfer_cluster_lock_req (xlator_t *this, int32_t *lock_count)
+{
+ gd1_mgmt_cluster_lock_req req = {{0},};
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int pending_lock = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+
+ GF_ASSERT (this);
+ GF_ASSERT (lock_count);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ uuid_copy (req.uuid, priv->uuid);
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto unwind;
+
+
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+
+ ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
+ prog, GD_MGMT_PROBE_QUERY,
+ NULL, gd_xdr_from_mgmt_probe_req,
+ this);
+ if (!ret)
+ pending_lock++;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent lock req to %d peers",
+ pending_lock);
+ *lock_count = pending_lock;
+
+unwind:
+
+ return ret;
+}*/
+
+/*int
+glusterd_xfer_cluster_unlock_req (xlator_t *this, int32_t *pending_count)
+{
+ gf_hdr_common_t *hdr = NULL;
+ gf_mop_cluster_unlock_req_t *req = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int pending_unlock = 0;
+
+ GF_ASSERT (this);
+ GF_ASSERT (pending_count);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ hdrlen = gf_hdr_len (req, 0);
+ hdr = gf_hdr_new (req, 0);
+
+ GF_VALIDATE_OR_GOTO (this->name, hdr, unwind);
+
+ req = gf_param (hdr);
+ uuid_copy (req->uuid, priv->uuid);
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto unwind;
+
+
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST,
+ GF_MOP_CLUSTER_UNLOCK,
+ hdr, hdrlen, NULL, 0, NULL);
+ if (!ret)
+ pending_unlock++;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent unlock req to %d peers",
+ pending_unlock);
+ *pending_count = pending_unlock;
+
+unwind:
+ if (hdr)
+ GF_FREE (hdr);
+
+ return ret;
+}*/
+
+
+int
+glusterd_friend_add (const char *hoststr,
+ glusterd_peer_state_t state,
+ uuid_t *uuid,
+ struct rpc_clnt *rpc,
+ glusterd_peerinfo_t **friend)
+{
+ int ret = 0;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ dict_t *options = NULL;
+ char *port_str = NULL;
+ int port_num = 0;
+ struct rpc_clnt_config rpc_cfg = {0,};
+
+ priv = THIS->private;
+
+ peerinfo = GF_CALLOC (1, sizeof(*peerinfo), gf_gld_mt_peerinfo_t);
+
+ if (!peerinfo)
+ return -1;
+
+ if (friend)
+ *friend = peerinfo;
+
+ peerinfo->state.state = state;
+ if (hoststr) {
+ peerinfo->hostname = gf_strdup (hoststr);
+ rpc_cfg.remote_host = gf_strdup (hoststr);
+ }
+ INIT_LIST_HEAD (&peerinfo->uuid_list);
+
+ list_add_tail (&peerinfo->uuid_list, &priv->peers);
+
+ if (uuid) {
+ uuid_copy (peerinfo->uuid, *uuid);
+ }
+
+
+ if (hoststr) {
+ options = dict_new ();
+ if (!options)
+ return -1;
+
+ ret = dict_set_str (options, "remote-host", (char *)hoststr);
+ if (ret)
+ goto out;
+
+
+ port_str = getenv ("GLUSTERD_REMOTE_PORT");
+ port_num = atoi (port_str);
+ rpc_cfg.remote_port = port_num;
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "remote-port: %d", port_num);
+
+ //ret = dict_set_int32 (options, "remote-port", GLUSTERD_DEFAULT_PORT);
+ ret = dict_set_int32 (options, "remote-port", port_num);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (options, "transport.address-family", "inet");
+ if (ret)
+ goto out;
+
+ rpc = rpc_clnt_init (&rpc_cfg, options, THIS->ctx, THIS->name);
+
+ if (!rpc) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "rpc init failed for peer: %s!", hoststr);
+ ret = -1;
+ goto out;
+ }
+
+ ret = rpc_clnt_register_notify (rpc, glusterd_rpc_notify,
+ peerinfo);
+
+ peerinfo->rpc = rpc;
+
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "connect returned %d", ret);
+
+out:
+ return ret;
+
+}
+
+/*int
+glusterd_friend_probe (const char *hoststr)
+{
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+
+
+ ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo);
+
+ if (ret) {
+ //We should not reach this state ideally
+ GF_ASSERT (0);
+ goto out;
+ }
+
+ ret = glusterd_xfer_probe_msg (peerinfo, THIS);
+
+out:
+ return ret;
+}*/
+
+int
+glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr)
+{
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_probe_ctx_t *ctx = NULL;
+
+ GF_ASSERT (hoststr);
+ GF_ASSERT (req);
+
+ ret = glusterd_friend_find (NULL, (char *)hoststr, &peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL, "Unable to find peerinfo"
+ " for host: %s", hoststr);
+ ret = glusterd_friend_add ((char *)hoststr, GD_PEER_STATE_NONE,
+ NULL, NULL, &peerinfo);
+ }
+
+ ret = glusterd_friend_sm_new_event
+ (GD_FRIEND_EVENT_PROBE, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to get new event");
+ return ret;
+ }
+
+ ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
+
+ if (!ctx) {
+ return ret;
+ }
+
+ ctx->hostname = gf_strdup (hoststr);
+ ctx->req = req;
+
+ event->peerinfo = peerinfo;
+ event->ctx = ctx;
+
+ ret = glusterd_friend_sm_inject_event (event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to inject event %d, "
+ "ret = %d", event->event, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/*int
+glusterd_interpret (xlator_t *this, transport_t *trans,
+ char *hdr_p, size_t hdrlen, struct iobuf *iobuf)
+{
+ glusterd_connection_t *conn = NULL;
+ gf_hdr_common_t *hdr = NULL;
+ xlator_t *bound_xl = NULL;
+ call_frame_t *frame = NULL;
+ peer_info_t *peerinfo = NULL;
+ int32_t type = -1;
+ int32_t op = -1;
+ int32_t ret = 0;
+
+ hdr = (gf_hdr_common_t *)hdr_p;
+ type = ntoh32 (hdr->type);
+ op = ntoh32 (hdr->op);
+
+ conn = trans->xl_private;
+ if (conn)
+ bound_xl = conn->bound_xl;
+
+ if (GF_MOP_PROBE_QUERY != op) {
+ //ret = glusterd_validate_sender (hdr, hdrlen);
+ }
+
+ peerinfo = &trans->peerinfo;
+ switch (type) {
+
+ case GF_OP_TYPE_MOP_REQUEST:
+ if ((op < 0) || (op >= GF_MOP_MAXVALUE)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "invalid mop %"PRId32" from %s",
+ op, peerinfo->identifier);
+ break;
+ }
+ frame = glusterd_get_frame_for_call (trans, hdr);
+ frame->op = op;
+ ret = glusterd_ops[op] (frame, hdr, hdrlen);
+ break;
+
+ case GF_OP_TYPE_MOP_REPLY:
+ if ((op < 0) || (op >= GF_MOP_MAXVALUE)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "invalid mop %"PRId32" from %s",
+ op, peerinfo->identifier);
+ break;
+ }
+ ret = glusterd_resp_ops[op] (frame, hdr, hdrlen);
+ gf_log ("glusterd", GF_LOG_NORMAL, "Obtained MOP response");
+ break;
+
+
+ default:
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unknown type: %d", type);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+*/
+
+int
+glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *hostname)
+{
+ gd1_mgmt_friend_rsp rsp = {{0}, };
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ GF_ASSERT (hostname);
+
+ rsp.op_ret = 0;
+ this = THIS;
+ GF_ASSERT (this);
+
+ conf = this->private;
+
+ uuid_copy (rsp.uuid, conf->uuid);
+ rsp.hostname = gf_strdup (hostname);
+
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gd_xdr_serialize_mgmt_friend_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Responded to %s, ret: %d", hostname, ret);
+ return ret;
+}
+
+int
+glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
+ int32_t op_errno, char *hostname)
+{
+ gf1_cli_probe_rsp rsp = {0, };
+ int32_t ret = -1;
+
+ GF_ASSERT (req);
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = op_errno;
+ rsp.hostname = hostname;
+
+ ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
+ gf_xdr_serialize_cli_probe_rsp);
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Responded to CLI, ret: %d",ret);
+
+ return ret;
+}
+
+int32_t
+glusterd_op_txn_begin ()
+{
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_op_sm_event_t *event = NULL;
+ int32_t locked = 0;
+
+ priv = THIS->private;
+ GF_ASSERT (priv);
+
+ ret = glusterd_lock (priv->uuid);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to acquire local lock, ret: %d", ret);
+ goto out;
+ }
+
+ locked = 1;
+ gf_log ("glusterd", GF_LOG_NORMAL, "Acquired local lock");
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_START_LOCK, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event, ret: %d", ret);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Returning %d", ret);
+
+out:
+ if (locked && ret)
+ glusterd_unlock (priv->uuid);
+ return ret;
+}
+
+int32_t
+glusterd_create_volume (rpcsvc_request_t *req, dict_t *dict)
+{
+ int32_t ret = -1;
+ char *volname = NULL;
+ char *bricks = NULL;
+ int type = 0;
+ int sub_count = 2;
+ int count = 0;
+ char cmd_str[8192] = {0,};
+
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
+
+ glusterd_op_set_op (GD_OP_CREATE_VOLUME);
+
+ glusterd_op_set_ctx (GD_OP_CREATE_VOLUME, dict);
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "type", &type);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32 (dict, "count", &count);
+ if (ret)
+ goto out;
+
+ ret = dict_get_str (dict, "bricks", &bricks);
+ if (ret)
+ goto out;
+
+ switch (type) {
+ case GF_CLUSTER_TYPE_REPLICATE:
+ {
+ ret = dict_get_int32 (dict, "replica-count", &sub_count);
+ if (ret)
+ goto out;
+ snprintf (cmd_str, 8192,
+ "glusterfs-volgen -n %s -c /etc/glusterd -r 1 %s",
+ volname, bricks);
+ system (cmd_str);
+ break;
+ }
+ case GF_CLUSTER_TYPE_STRIPE:
+ {
+ ret = dict_get_int32 (dict, "stripe-count", &sub_count);
+ if (ret)
+ goto out;
+ snprintf (cmd_str, 8192,
+ "glusterfs-volgen -n %s -c /etc/glusterd -r 0 %s",
+ volname, bricks);
+ system (cmd_str);
+ break;
+ }
+ case GF_CLUSTER_TYPE_NONE:
+ {
+ snprintf (cmd_str, 8192,
+ "glusterfs-volgen -n %s -c /etc/glusterd %s",
+ volname, bricks);
+ system (cmd_str);
+ break;
+ }
+ }
+
+ ret = glusterd_op_txn_begin ();
+
+out:
+ return ret;
+}
+
+
+int
+glusterd_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
+ void *data)
+{
+ xlator_t *this = NULL;
+ char *handshake = NULL;
+ glusterd_conf_t *conf = NULL;
+ int ret = 0;
+
+ this = mydata;
+ conf = this->private;
+
+
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ {
+ // connect happened, send 'get_supported_versions' mop
+ ret = dict_get_str (this->options, "disable-handshake",
+ &handshake);
+
+ gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_CONNECT");
+
+ if ((ret < 0) || (strcasecmp (handshake, "on"))) {
+ //ret = client_handshake (this, conf->rpc);
+
+ } else {
+ //conf->rpc->connected = 1;
+ ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
+ }
+ break;
+ }
+
+ case RPC_CLNT_DISCONNECT:
+
+ //Inject friend disconnected here
+
+ gf_log (this->name, GF_LOG_TRACE, "got RPC_CLNT_DISCONNECT");
+
+ default_notify (this, GF_EVENT_CHILD_DOWN, NULL);
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_TRACE,
+ "got some other RPC event %d", event);
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
new file mode 100644
index 00000000000..c72a91d5a09
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
@@ -0,0 +1,51 @@
+/*
+ Copyright (c) 2008-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef __GLUSTERD_MEM_TYPES_H__
+#define __GLUSTERD_MEM_TYPES_H__
+
+#include "mem-types.h"
+
+enum gf_gld_mem_types_ {
+ gf_gld_mt_dir_entry_t = gf_common_mt_end + 1,
+ gf_gld_mt_volfile_ctx,
+ gf_gld_mt_glusterd_state_t,
+ gf_gld_mt_glusterd_conf_t,
+ gf_gld_mt_locker,
+ gf_gld_mt_lock_table,
+ gf_gld_mt_char,
+ gf_gld_mt_glusterd_connection_t,
+ gf_gld_mt_resolve_comp,
+ gf_gld_mt_peerinfo_t,
+ gf_gld_mt_friend_sm_event_t,
+ gf_gld_mt_friend_req_ctx_t,
+ gf_gld_mt_op_sm_event_t,
+ gf_gld_mt_op_lock_ctx_t,
+ gf_gld_mt_op_stage_ctx_t,
+ gf_gld_mt_op_commit_ctx_t,
+ gf_gld_mt_mop_stage_req_t,
+ gf_gld_mt_probe_ctx_t,
+ gf_gld_mt_create_volume_ctx_t,
+ gf_gld_mt_glusterd_volinfo_t,
+ gf_gld_mt_glusterd_brickinfo_t,
+ gf_gld_mt_end
+};
+#endif
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
new file mode 100644
index 00000000000..41203606c63
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -0,0 +1,1041 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is GF_FREE software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <time.h>
+#include <sys/uio.h>
+#include <sys/resource.h>
+
+#include <libgen.h>
+#include <uuid/uuid.h>
+
+//#include "transport.h"
+#include "fnmatch.h"
+#include "xlator.h"
+#include "protocol-common.h"
+#include "glusterd.h"
+#include "call-stub.h"
+#include "defaults.h"
+#include "list.h"
+#include "dict.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+//#include "md5.h"
+#include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-utils.h"
+#include "glusterd-ha.h"
+
+static struct list_head gd_op_sm_queue;
+glusterd_op_info_t opinfo;
+
+static int
+glusterd_op_get_len (glusterd_op_t op)
+{
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+ int ret = -1;
+
+ switch (op) {
+ case GD_OP_CREATE_VOLUME:
+ {
+ dict_t *dict = glusterd_op_get_ctx (op);
+ ret = dict_serialized_length (dict);
+ return ret;
+ }
+ break;
+
+ case GD_OP_START_BRICK:
+ break;
+
+ default:
+ GF_ASSERT (op);
+
+ }
+
+ return 0;
+}
+
+int
+glusterd_op_build_payload (glusterd_op_t op, gd1_mgmt_stage_op_req **req)
+{
+ int len = 0;
+ int ret = -1;
+ gd1_mgmt_stage_op_req *stage_req = NULL;
+
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+ GF_ASSERT (req);
+
+ len = glusterd_op_get_len (op);
+
+ stage_req = GF_CALLOC (1, sizeof (*stage_req),
+ gf_gld_mt_mop_stage_req_t);
+
+ if (!stage_req) {
+ gf_log ("", GF_LOG_ERROR, "Out of Memory");
+ goto out;
+ }
+
+ stage_req->buf.buf_val = GF_CALLOC (1, len,
+ gf_gld_mt_mop_stage_req_t);
+
+ if (!stage_req->buf.buf_val) {
+ gf_log ("", GF_LOG_ERROR, "Out of Memory");
+ goto out;
+ }
+
+ glusterd_get_uuid (&stage_req->uuid);
+ stage_req->op = op;
+ stage_req->buf.buf_len = len;
+
+ switch (op) {
+ case GD_OP_CREATE_VOLUME:
+ {
+ dict_t *dict = NULL;
+ dict = glusterd_op_get_ctx (op);
+ GF_ASSERT (dict);
+ ret = dict_serialize (dict,
+ stage_req->buf.buf_val);
+ if (ret) {
+ goto out;
+ }
+ }
+
+
+ default:
+ break;
+ }
+
+ *req = stage_req;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+
+/*static int
+glusterd_xfer_stage_req (xlator_t *this, int32_t *lock_count)
+{
+ gf_hdr_common_t *hdr = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int pending_lock = 0;
+ int i = 0;
+
+ GF_ASSERT (this);
+ GF_ASSERT (lock_count);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+
+ for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
+ if (opinfo.pending_op[i])
+ break;
+ }
+
+ if (GD_OP_MAX == i) {
+
+ //No pending ops, inject stage_acc
+
+ glusterd_op_sm_event_t *event = NULL;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_STAGE_ACC,
+ &event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ return ret;
+ }
+
+
+ ret = glusterd_op_build_payload (i, &hdr, &hdrlen);
+
+ if (ret)
+ goto out;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto out;
+
+ list_for_each_entry (peerinfo, &opinfo.op_peers, op_peers_list) {
+ GF_ASSERT (peerinfo);
+
+ GF_ASSERT (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED);
+
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST,
+ GF_MOP_STAGE_OP,
+ hdr, hdrlen, NULL, 0, NULL);
+ if (!ret)
+ pending_lock++;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+ pending_lock);
+ if (i < GD_OP_MAX)
+ opinfo.pending_op[i] = 0;
+
+ *lock_count = pending_lock;
+
+out:
+ if (hdr)
+ GF_FREE (hdr);
+
+ return ret;
+} */
+
+/*static int
+glusterd_xfer_commit_req (xlator_t *this, int32_t *lock_count)
+{
+ gf_hdr_common_t *hdr = NULL;
+ size_t hdrlen = -1;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int pending_lock = 0;
+ int i = 0;
+
+ GF_ASSERT (this);
+ GF_ASSERT (lock_count);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+
+ for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
+ if (opinfo.commit_op[i])
+ break;
+ }
+
+ if (GD_OP_MAX == i) {
+
+ //No pending ops, inject stage_acc
+
+ glusterd_op_sm_event_t *event = NULL;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_COMMIT_ACC,
+ &event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ return ret;
+ }
+
+
+ ret = glusterd_op_build_payload (i, &hdr, &hdrlen);
+
+ if (ret)
+ goto out;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ goto out;
+
+ list_for_each_entry (peerinfo, &opinfo.op_peers, op_peers_list) {
+ GF_ASSERT (peerinfo);
+
+ GF_ASSERT (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED);
+
+
+ ret = glusterd_xfer (dummy_frame, this,
+ peerinfo->trans,
+ GF_OP_TYPE_MOP_REQUEST,
+ GF_MOP_STAGE_OP,
+ hdr, hdrlen, NULL, 0, NULL);
+ if (!ret)
+ pending_lock++;
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+ pending_lock);
+ if (i < GD_OP_MAX)
+ opinfo.pending_op[i] = 0;
+
+ *lock_count = pending_lock;
+
+out:
+ if (hdr)
+ GF_FREE (hdr);
+
+ return ret;
+}*/
+
+static int
+glusterd_op_stage_create_volume (gd1_mgmt_stage_op_req *req)
+{
+ int ret = 0;
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ gf_boolean_t exists = _gf_false;
+
+ GF_ASSERT (req);
+
+ ret = dict_unserialize (req->buf.buf_val, req->buf.buf_len, &dict);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to unserialize dict");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get volume name");
+ goto out;
+ }
+
+ exists = glusterd_check_volume_exists (volname);
+
+ if (exists) {
+ gf_log ("", GF_LOG_ERROR, "Volume with name: %s exists",
+ volname);
+ ret = -1;
+ } else {
+ ret = 0;
+ }
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_create_volume (gd1_mgmt_stage_op_req *req)
+{
+ int ret = 0;
+ dict_t *dict = NULL;
+ char *volname = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ xlator_t *this = NULL;
+ char *brick = NULL;
+ int32_t count = 0;
+ int32_t i = 0;
+ char key[50];
+
+ GF_ASSERT (req);
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_unserialize (req->buf.buf_val, req->buf.buf_len, &dict);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to unserialize dict");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_new (&volinfo);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to allocate memory");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get volume name");
+ goto out;
+ }
+
+ strncpy (volinfo->volname, volname, 1024);
+
+ GF_ASSERT (volinfo->volname);
+
+ ret = dict_get_int32 (dict, "type", &volinfo->type);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get type");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "count", &volinfo->brick_count);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to get count");
+ goto out;
+ }
+
+ count = volinfo->brick_count;
+
+ while ( i <= count) {
+ snprintf (key, 50, "brick%d", i);
+ ret = dict_get_str (dict, key, &brick);
+ if (ret)
+ goto out;
+
+ ret = glusterd_brickinfo_from_brick (brick, &brickinfo);
+ if (ret)
+ goto out;
+
+ list_add_tail (&brickinfo->brick_list, &volinfo->bricks);
+ i++;
+ }
+
+ ret = glusterd_ha_create_volume (volinfo);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_none (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ priv = this->private;
+
+ proc = &priv->mgmt->proctable[GD_MGMT_CLUSTER_LOCK];
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, NULL);
+ }
+ // TODO: if pending_count = 0, inject ALL_ACC here
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ priv = this->private;
+
+ proc = &priv->mgmt->proctable[GD_MGMT_CLUSTER_UNLOCK];
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, NULL);
+ }
+ // TODO: if pending_count = 0, inject ALL_ACC here
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+
+}
+
+static int
+glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+ int32_t status = 0;
+
+
+ GF_ASSERT (event);
+ GF_ASSERT (ctx);
+
+ lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
+
+ status = glusterd_lock (lock_ctx->uuid);
+
+ gf_log ("", GF_LOG_DEBUG, "Lock Returned %d", status);
+
+ ret = glusterd_op_lock_send_resp (lock_ctx->req, status);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+
+ GF_ASSERT (event);
+ GF_ASSERT (ctx);
+
+ lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
+
+ ret = glusterd_unlock (lock_ctx->uuid);
+
+ gf_log ("", GF_LOG_DEBUG, "Unlock Returned %d", ret);
+
+ ret = glusterd_op_unlock_send_resp (lock_ctx->req, ret);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_rcvd_lock_acc (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_sm_event_t *new_event = NULL;
+
+ GF_ASSERT (event);
+
+ opinfo.pending_count--;
+
+ if (opinfo.pending_count)
+ goto out;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_ALL_ACC, &new_event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (new_event);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (priv->mgmt);
+
+ proc = &priv->mgmt->proctable[GD_MGMT_STAGE_OP];
+ GF_ASSERT (proc);
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, NULL);
+ }
+ // TODO: if pending_count = 0, inject ALL_ACC here
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+
+}
+
+static int
+glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (priv->mgmt);
+
+ proc = &priv->mgmt->proctable[GD_MGMT_COMMIT_OP];
+ GF_ASSERT (proc);
+ if (proc->fn) {
+ ret = proc->fn (NULL, this, NULL);
+ }
+ // TODO: if pending_count = 0, inject ALL_ACC here
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+
+}
+
+static int
+glusterd_op_ac_rcvd_stage_op_acc (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_sm_event_t *new_event = NULL;
+
+ GF_ASSERT (event);
+
+ opinfo.pending_count--;
+
+ if (opinfo.pending_count)
+ goto out;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_STAGE_ACC, &new_event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (new_event);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_sm_event_t *new_event = NULL;
+
+ GF_ASSERT (event);
+
+ opinfo.pending_count--;
+
+ if (opinfo.pending_count)
+ goto out;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_COMMIT_ACC, &new_event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (new_event);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_rcvd_unlock_acc (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_op_sm_event_t *new_event = NULL;
+
+ GF_ASSERT (event);
+
+ opinfo.pending_count--;
+
+ if (opinfo.pending_count)
+ goto out;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_ALL_ACC, &new_event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (new_event);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+
+out:
+ return ret;
+}
+
+static int
+glusterd_op_ac_commit_error (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+
+ //Log here with who failed the commit
+ //
+ return ret;
+}
+
+static int
+glusterd_op_ac_stage_op (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = -1;
+ gd1_mgmt_stage_op_req *req = NULL;
+ glusterd_op_stage_ctx_t *stage_ctx = NULL;
+ int32_t status = 0;
+
+ GF_ASSERT (ctx);
+
+ stage_ctx = ctx;
+
+ req = stage_ctx->stage_req;
+
+ switch (req->op) {
+ case GD_OP_CREATE_VOLUME:
+ status = glusterd_op_stage_create_volume (req);
+ break;
+
+ default:
+ gf_log ("", GF_LOG_ERROR, "Unknown op %d",
+ req->op);
+ }
+
+ ret = glusterd_op_stage_send_resp (stage_ctx->req, req->op, status);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ gd1_mgmt_stage_op_req *req = NULL;
+ glusterd_op_commit_ctx_t *commit_ctx = NULL;
+ int32_t status = 0;
+
+ GF_ASSERT (ctx);
+
+ commit_ctx = ctx;
+
+ req = commit_ctx->stage_req;
+
+ switch (req->op) {
+ case GD_OP_CREATE_VOLUME:
+ ret = glusterd_op_create_volume (req);
+ break;
+
+ default:
+ gf_log ("", GF_LOG_ERROR, "Unknown op %d",
+ req->op);
+ }
+
+ ret = glusterd_op_commit_send_resp (commit_ctx->req, req->op, status);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+
+static int
+glusterd_op_sm_transition_state (glusterd_op_info_t *opinfo,
+ glusterd_op_sm_t *state,
+ glusterd_op_sm_event_type_t event_type)
+{
+
+ GF_ASSERT (state);
+ GF_ASSERT (opinfo);
+
+ gf_log ("", GF_LOG_NORMAL, "Transitioning from %d to %d",
+ opinfo->state.state, state[event_type].next_state);
+ opinfo->state.state =
+ state[event_type].next_state;
+ return 0;
+}
+
+
+
+glusterd_op_sm_t glusterd_op_state_default [] = {
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_send_lock},//EVENT_START_LOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_lock}, //EVENT_LOCK
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_lock_sent [] = {
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_rcvd_lock_acc}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_send_stage_op}, //EVENT_ALL_ACC
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_locked [] = {
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGED, glusterd_op_ac_stage_op}, //EVENT_STAGE_OP
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_stage_op_sent [] = {
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_rcvd_stage_op_acc}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_send_stage_op}, //EVENT_ALL_ACC
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_send_commit_op}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_staged [] = {
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_commit_op}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_commit_op_sent [] = {
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_rcvd_commit_op_acc}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_commit_op}, //EVENT_ALL_ACC
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_commit_error}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_commited [] = {
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+glusterd_op_sm_t glusterd_op_state_unlock_sent [] = {
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_NONE
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_LOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_rcvd_unlock_acc}, //EVENT_RCVD_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_ALL_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_RCVD_RJT
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_MAX
+};
+
+
+glusterd_op_sm_t *glusterd_op_state_table [] = {
+ glusterd_op_state_default,
+ glusterd_op_state_lock_sent,
+ glusterd_op_state_locked,
+ glusterd_op_state_stage_op_sent,
+ glusterd_op_state_staged,
+ glusterd_op_state_commit_op_sent,
+ glusterd_op_state_commited,
+ glusterd_op_state_unlock_sent
+};
+
+int
+glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
+ glusterd_op_sm_event_t **new_event)
+{
+ glusterd_op_sm_event_t *event = NULL;
+
+ GF_ASSERT (new_event);
+ GF_ASSERT (GD_OP_EVENT_NONE <= event_type &&
+ GD_OP_EVENT_MAX > event_type);
+
+ event = GF_CALLOC (1, sizeof (*event), gf_gld_mt_op_sm_event_t);
+
+ if (!event)
+ return -1;
+
+ *new_event = event;
+ event->event = event_type;
+ INIT_LIST_HEAD (&event->list);
+
+ return 0;
+}
+
+int
+glusterd_op_sm_inject_event (glusterd_op_sm_event_t *event)
+{
+ GF_ASSERT (event);
+ gf_log ("glusterd", GF_LOG_NORMAL, "Enqueuing event: %d",
+ event->event);
+ list_add_tail (&event->list, &gd_op_sm_queue);
+
+ return 0;
+}
+
+
+int
+glusterd_op_sm ()
+{
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_t *tmp = NULL;
+ int ret = -1;
+ glusterd_op_sm_ac_fn handler = NULL;
+ glusterd_op_sm_t *state = NULL;
+ glusterd_op_sm_event_type_t event_type = 0;
+
+
+ while (!list_empty (&gd_op_sm_queue)) {
+
+ list_for_each_entry_safe (event, tmp, &gd_op_sm_queue, list) {
+
+ list_del_init (&event->list);
+ event_type = event->event;
+
+ state = glusterd_op_state_table[opinfo.state.state];
+
+ GF_ASSERT (state);
+
+ handler = state[event_type].handler;
+ GF_ASSERT (handler);
+
+ ret = handler (event, event->ctx);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "handler returned: %d", ret);
+ return ret;
+ }
+
+ ret = glusterd_op_sm_transition_state (&opinfo, state,
+ event_type);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to transition"
+ "state from %d to %d",
+ opinfo.state.state,
+ state[event_type].next_state);
+ return ret;
+ }
+
+ GF_FREE (event);
+ }
+ }
+
+
+ ret = 0;
+
+ return ret;
+}
+
+int32_t
+glusterd_op_set_op (glusterd_op_t op)
+{
+
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+
+ opinfo.op[op] = 1;
+ opinfo.pending_op[op] = 1;
+ opinfo.commit_op[op] = 1;
+
+ return 0;
+
+}
+
+int32_t
+glusterd_op_set_ctx (glusterd_op_t op, void *ctx)
+{
+
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+
+ opinfo.op_ctx[op] = ctx;
+
+ return 0;
+
+}
+
+
+void *
+glusterd_op_get_ctx (glusterd_op_t op)
+{
+ GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT (op > GD_OP_NONE);
+
+ return opinfo.op_ctx[op];
+
+}
+
+int
+glusterd_op_sm_init ()
+{
+ INIT_LIST_HEAD (&gd_op_sm_queue);
+ return 0;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
new file mode 100644
index 00000000000..61bdc8885ce
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -0,0 +1,167 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_OP_SM_H_
+#define _GLUSTERD_OP_SM_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+#include "glusterd.h"
+
+#define GD_VOLUME_NAME_MAX 256
+
+typedef enum glusterd_op_sm_state_ {
+ GD_OP_STATE_DEFAULT = 0,
+ GD_OP_STATE_LOCK_SENT,
+ GD_OP_STATE_LOCKED,
+ GD_OP_STATE_STAGE_OP_SENT,
+ GD_OP_STATE_STAGED,
+ GD_OP_STATE_COMMIT_OP_SENT,
+ GD_OP_STATE_COMMITED,
+ GD_OP_STATE_UNLOCK_SENT,
+ GD_OP_STATE_MAX,
+} glusterd_op_sm_state_t;
+
+typedef enum glusterd_op_sm_event_type_ {
+ GD_OP_EVENT_NONE = 0,
+ GD_OP_EVENT_START_LOCK,
+ GD_OP_EVENT_LOCK,
+ GD_OP_EVENT_RCVD_ACC,
+ GD_OP_EVENT_ALL_ACC,
+ GD_OP_EVENT_STAGE_ACC,
+ GD_OP_EVENT_COMMIT_ACC,
+ GD_OP_EVENT_RCVD_RJT,
+ GD_OP_EVENT_STAGE_OP,
+ GD_OP_EVENT_COMMIT_OP,
+ GD_OP_EVENT_UNLOCK,
+ GD_OP_EVENT_MAX
+} glusterd_op_sm_event_type_t;
+
+
+struct glusterd_op_sm_event_ {
+ struct list_head list;
+ void *ctx;
+ glusterd_op_sm_event_type_t event;
+};
+
+typedef struct glusterd_op_sm_event_ glusterd_op_sm_event_t;
+
+typedef int (*glusterd_op_sm_ac_fn) (glusterd_op_sm_event_t *, void *);
+
+typedef struct glusterd_op_sm_ {
+ glusterd_op_sm_state_t next_state;
+ glusterd_op_sm_ac_fn handler;
+} glusterd_op_sm_t;
+
+typedef enum glusterd_op_ {
+ GD_OP_NONE = 0,
+ GD_OP_CREATE_VOLUME,
+ GD_OP_START_BRICK,
+ GD_OP_STOP_BRICK,
+ GD_OP_DELETE_VOLUME,
+ GD_OP_START_VOLUME,
+ GD_OP_STOP_VOLUME,
+ GD_OP_RENAME_VOLUME,
+ GD_OP_DEFRAG_VOLUME,
+ GD_OP_ADD_BRICK,
+ GD_OP_REMOVE_BRICK,
+ GD_OP_REPLACE_BRICK,
+ GD_OP_SYNC_VOLUME,
+ GD_OP_MAX,
+} glusterd_op_t;
+
+typedef struct glusterd_op_sm_state_info_ {
+ glusterd_op_sm_state_t state;
+ struct timeval time;
+} glusterd_op_sm_state_info_t;
+
+struct glusterd_op_info_ {
+ glusterd_op_sm_state_info_t state;
+ int32_t pending_count;
+ int32_t op_count;
+ glusterd_op_t op[GD_OP_MAX];
+ glusterd_op_t pending_op[GD_OP_MAX];
+ glusterd_op_t commit_op[GD_OP_MAX];
+ struct list_head op_peers;
+ void *op_ctx[GD_OP_MAX];
+};
+
+typedef struct glusterd_op_info_ glusterd_op_info_t;
+
+struct glusterd_op_create_volume_ctx_ {
+ char volume_name[GD_VOLUME_NAME_MAX];
+};
+
+typedef struct glusterd_op_create_volume_ctx_ glusterd_op_create_volume_ctx_t;
+
+
+struct glusterd_op_lock_ctx_ {
+ uuid_t uuid;
+ rpcsvc_request_t *req;
+};
+
+typedef struct glusterd_op_lock_ctx_ glusterd_op_lock_ctx_t;
+
+struct glusterd_op_stage_ctx_ {
+ rpcsvc_request_t *req;
+ void *stage_req;
+};
+
+typedef struct glusterd_op_stage_ctx_ glusterd_op_stage_ctx_t;
+
+typedef glusterd_op_stage_ctx_t glusterd_op_commit_ctx_t;
+
+int
+glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
+ glusterd_op_sm_event_t **new_event);
+int
+glusterd_op_sm_inject_event (glusterd_op_sm_event_t *event);
+
+int
+glusterd_op_sm_init ();
+
+int
+glusterd_op_sm ();
+
+int32_t
+glusterd_op_set_ctx (glusterd_op_t op, void *ctx);
+
+int32_t
+glusterd_op_set_op (glusterd_op_t op);
+
+int
+glusterd_op_build_payload (glusterd_op_t op, gd1_mgmt_stage_op_req **req);
+
+
+void *
+glusterd_op_get_ctx (glusterd_op_t op);
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
new file mode 100644
index 00000000000..53a0f5af8f8
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -0,0 +1,384 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is GF_FREE software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <time.h>
+#include <sys/uio.h>
+#include <sys/resource.h>
+
+#include <libgen.h>
+#include <uuid/uuid.h>
+
+//#include "transport.h"
+#include "fnmatch.h"
+#include "xlator.h"
+#include "protocol-common.h"
+#include "glusterd.h"
+#include "call-stub.h"
+#include "defaults.h"
+#include "list.h"
+#include "dict.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+#include "glusterd-sm.h"
+
+static struct list_head gd_friend_sm_queue;
+
+static int
+glusterd_ac_none (glusterd_friend_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_ac_friend_add (glusterd_friend_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+
+ GF_ASSERT (event);
+ peerinfo = event->peerinfo;
+
+ this = THIS;
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ GF_ASSERT (conf->mgmt);
+
+ proc = &conf->mgmt->proctable[GD_MGMT_FRIEND_ADD];
+ if (proc->fn) {
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame) {
+ goto out;
+ }
+ frame->local = ctx;
+ ret = proc->fn (frame, this, event);
+ }
+
+/* ret = glusterd_xfer_friend_req_msg (peerinfo, THIS);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to probe: %s", hostname);
+ }
+*/
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_ac_friend_probe (glusterd_friend_sm_event_t *event, void *ctx)
+{
+ int ret = -1;
+ char *hostname = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ glusterd_probe_ctx_t *probe_ctx = NULL;
+
+ GF_ASSERT (ctx);
+
+ probe_ctx = ctx;
+ hostname = probe_ctx->hostname;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ if (!conf->mgmt)
+ goto out;
+
+
+ proc = &conf->mgmt->proctable[GD_MGMT_PROBE_QUERY];
+ if (proc->fn) {
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame) {
+ goto out;
+ }
+ frame->local = ctx;
+ ret = proc->fn (frame, this, hostname);
+ }
+
+
+/* ret = glusterd_friend_probe (hostname);
+
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Unable to probe: %s", hostname);
+ }
+*/
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+/*static int
+glusterd_ac_none (void *ctx)
+{
+ int ret = 0;
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}*/
+
+static int
+glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ uuid_t uuid;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_req_ctx_t *ev_ctx = NULL;
+
+ GF_ASSERT (ctx);
+ ev_ctx = ctx;
+ uuid_copy (uuid, ev_ctx->uuid);
+ peerinfo = event->peerinfo;
+ GF_ASSERT (peerinfo);
+ uuid_copy (peerinfo->uuid, ev_ctx->uuid);
+
+ ret = glusterd_xfer_friend_add_resp (ev_ctx->req, ev_ctx->hostname);
+
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+static int
+glusterd_friend_sm_transition_state (glusterd_peerinfo_t *peerinfo,
+ glusterd_sm_t *state,
+ glusterd_friend_sm_event_type_t event_type)
+{
+
+ GF_ASSERT (state);
+ GF_ASSERT (peerinfo);
+
+ //peerinfo->state.state = state;
+
+ gf_log ("", GF_LOG_NORMAL, "Transitioning from %d to %d",
+ peerinfo->state.state, state[event_type].next_state);
+ peerinfo->state.state = state[event_type].next_state;
+ return 0;
+}
+
+
+glusterd_sm_t glusterd_state_default [] = {
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none},
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_friend_probe},//EV_PROBE
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_friend_add}, //EV_INIT_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_handle_friend_add_req}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_MAX
+};
+
+
+glusterd_sm_t glusterd_state_req_sent [] = {
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_none}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_none}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_handle_friend_add_req}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t glusterd_state_req_rcvd [] = {
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_friend_probe}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_friend_add}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t glusterd_state_befriended [] = {
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t glusterd_state_req_sent_rcvd [] = {
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_BEFRIENDED, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_REQ_SENT_RCVD, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t glusterd_state_rejected [] = {
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_NONE,
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_friend_probe}, //EVENT_PROBE,
+ {GD_FRIEND_STATE_REQ_SENT, glusterd_ac_friend_add}, //EVENT_INIT_FRIEND_REQ,
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_ACC
+ {GD_FRIEND_STATE_REJECTED, glusterd_ac_none}, //EVENT_RCVD_RJT
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_handle_friend_add_req}, //EVENT_RCV_FRIEND_REQ
+ {GD_FRIEND_STATE_DEFAULT, glusterd_ac_none}, //EVENT_REMOVE_FRIEND
+ {GD_FRIEND_STATE_REQ_RCVD, glusterd_ac_none},//EVENT_MAX
+};
+
+glusterd_sm_t *glusterd_friend_state_table [] = {
+ glusterd_state_default,
+ glusterd_state_req_sent,
+ glusterd_state_req_rcvd,
+ glusterd_state_befriended,
+ glusterd_state_req_sent_rcvd,
+ glusterd_state_rejected,
+};
+
+int
+glusterd_friend_sm_new_event (glusterd_friend_sm_event_type_t event_type,
+ glusterd_friend_sm_event_t **new_event)
+{
+ glusterd_friend_sm_event_t *event = NULL;
+
+ GF_ASSERT (new_event);
+ GF_ASSERT (GD_FRIEND_EVENT_NONE <= event_type &&
+ GD_FRIEND_EVENT_MAX > event_type);
+
+ event = GF_CALLOC (1, sizeof (*event), gf_gld_mt_friend_sm_event_t);
+
+ if (!event)
+ return -1;
+
+ *new_event = event;
+ event->event = event_type;
+ INIT_LIST_HEAD (&event->list);
+
+ return 0;
+}
+
+int
+glusterd_friend_sm_inject_event (glusterd_friend_sm_event_t *event)
+{
+ GF_ASSERT (event);
+ gf_log ("glusterd", GF_LOG_NORMAL, "Enqueuing event: %d",
+ event->event);
+ list_add_tail (&event->list, &gd_friend_sm_queue);
+
+ return 0;
+}
+
+
+int
+glusterd_friend_sm ()
+{
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_sm_event_t *tmp = NULL;
+ int ret = -1;
+ glusterd_friend_sm_ac_fn handler = NULL;
+ glusterd_sm_t *state = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_type_t event_type = 0;
+
+ list_for_each_entry_safe (event, tmp, &gd_friend_sm_queue, list) {
+
+ list_del_init (&event->list);
+ peerinfo = event->peerinfo;
+ event_type = event->event;
+
+ if (!peerinfo &&
+ (GD_FRIEND_EVENT_PROBE == event_type ||
+ GD_FRIEND_EVENT_RCVD_FRIEND_REQ == event_type)) {
+ ret = glusterd_friend_add (NULL, GD_PEER_STATE_NONE, NULL, NULL,
+ &peerinfo);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to add peer, "
+ "ret = %d", ret);
+ continue;
+ }
+ GF_ASSERT (peerinfo);
+ event->peerinfo = peerinfo;
+ }
+
+
+ state = glusterd_friend_state_table[peerinfo->state.state];
+
+ GF_ASSERT (state);
+
+ handler = state[event_type].handler;
+ GF_ASSERT (handler);
+
+ ret = handler (event, event->ctx);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "handler returned: "
+ "%d", ret);
+ return ret;
+ }
+
+ ret = glusterd_friend_sm_transition_state (peerinfo, state, event_type);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to transition"
+ "state from %d to %d", peerinfo->state.state,
+ state[event_type].next_state);
+ return ret;
+ }
+
+ GF_FREE (event);
+ }
+
+
+ ret = 0;
+
+ return ret;
+}
+
+
+int
+glusterd_friend_sm_init ()
+{
+ INIT_LIST_HEAD (&gd_friend_sm_queue);
+ return 0;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.h b/xlators/mgmt/glusterd/src/glusterd-sm.h
new file mode 100644
index 00000000000..087a4c301bc
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.h
@@ -0,0 +1,102 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_SM_H_
+#define _GLUSTERD_SM_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+#include "glusterd.h"
+#include "rpcsvc.h"
+
+
+typedef enum glusterd_friend_sm_state_ {
+ GD_FRIEND_STATE_DEFAULT = 0,
+ GD_FRIEND_STATE_REQ_SENT,
+ GD_FRIEND_STATE_REQ_RCVD,
+ GD_FRIEND_STATE_BEFRIENDED,
+ GD_FRIEND_STATE_REQ_SENT_RCVD,
+ GD_FRIEND_STATE_REJECTED,
+ GD_FRIEND_STATE_MAX
+} glusterd_friend_sm_state_t;
+
+typedef enum glusterd_friend_sm_event_type_ {
+ GD_FRIEND_EVENT_NONE = 0,
+ GD_FRIEND_EVENT_PROBE,
+ GD_FRIEND_EVENT_INIT_FRIEND_REQ,
+ GD_FRIEND_EVENT_RCVD_ACC,
+ GD_FRIEND_EVENT_RCVD_RJT,
+ GD_FRIEND_EVENT_RCVD_FRIEND_REQ,
+ GD_FRIEND_EVENT_REMOVE_FRIEND,
+ GD_FRIEND_EVENT_MAX
+} glusterd_friend_sm_event_type_t;
+
+
+struct glusterd_friend_sm_event_ {
+ struct list_head list;
+ glusterd_peerinfo_t *peerinfo;
+ void *ctx;
+ glusterd_friend_sm_event_type_t event;
+};
+
+typedef struct glusterd_friend_sm_event_ glusterd_friend_sm_event_t;
+
+typedef int (*glusterd_friend_sm_ac_fn) (glusterd_friend_sm_event_t *, void *);
+
+typedef struct glusterd_sm_ {
+ glusterd_friend_sm_state_t next_state;
+ glusterd_friend_sm_ac_fn handler;
+} glusterd_sm_t;
+
+typedef struct glusterd_friend_req_ctx_ {
+ uuid_t uuid;
+ char *hostname;
+ rpcsvc_request_t *req;
+} glusterd_friend_req_ctx_t;
+
+typedef struct glusterd_probe_ctx_ {
+ char *hostname;
+ rpcsvc_request_t *req;
+} glusterd_probe_ctx_t;
+int
+glusterd_friend_sm_new_event (glusterd_friend_sm_event_type_t event_type,
+ glusterd_friend_sm_event_t **new_event);
+int
+glusterd_friend_sm_inject_event (glusterd_friend_sm_event_t *event);
+
+int
+glusterd_friend_sm_init ();
+
+int
+glusterd_friend_sm ();
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
new file mode 100644
index 00000000000..0e3168036cb
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -0,0 +1,436 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <inttypes.h>
+
+
+#include "globals.h"
+#include "glusterfs.h"
+#include "compat.h"
+#include "dict.h"
+//#include "protocol.h"
+//#include "transport.h"
+#include "xlator.h"
+#include "logging.h"
+#include "timer.h"
+#include "defaults.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+#include "glusterd-mem-types.h"
+#include "glusterd.h"
+#include "glusterd-sm.h"
+#include "glusterd-utils.h"
+
+#include <sys/resource.h>
+#include <inttypes.h>
+
+static glusterd_lock_t lock;
+
+static int32_t
+glusterd_get_lock_owner (uuid_t *uuid)
+{
+ uuid_copy (*uuid, lock.owner) ;
+ return 0;
+}
+
+static int32_t
+glusterd_set_lock_owner (uuid_t owner)
+{
+ uuid_copy (lock.owner, owner);
+ //TODO: set timestamp
+ return 0;
+}
+
+static int32_t
+glusterd_unset_lock_owner (uuid_t owner)
+{
+ uuid_clear (lock.owner);
+ //TODO: set timestamp
+ return 0;
+}
+
+int32_t
+glusterd_lock (uuid_t uuid)
+{
+
+ uuid_t owner;
+ char new_owner_str[50];
+ char owner_str[50];
+ int ret = -1;
+
+ GF_ASSERT (uuid);
+ uuid_unparse (uuid, new_owner_str);
+
+ glusterd_get_lock_owner (&owner);
+
+ if (!uuid_is_null (owner)) {
+ uuid_unparse (owner, owner_str);
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to get lock"
+ " for uuid: %s, lock held by: %s", new_owner_str,
+ owner_str);
+ goto out;
+ }
+
+ ret = glusterd_set_lock_owner (uuid);
+
+ if (!ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL, "Cluster lock held by"
+ " %s", new_owner_str);
+ }
+
+out:
+ return ret;
+}
+
+
+int32_t
+glusterd_unlock (uuid_t uuid)
+{
+ uuid_t owner;
+ char new_owner_str[50];
+ char owner_str[50];
+ int32_t ret = -1;
+
+ GF_ASSERT (uuid);
+ uuid_unparse (uuid, new_owner_str);
+
+ glusterd_get_lock_owner (&owner);
+
+ if (NULL == owner) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Cluster lock not held!");
+ goto out;
+ }
+
+ ret = uuid_compare (uuid, owner);
+
+ if (ret) {
+ uuid_unparse (owner, owner_str);
+ gf_log ("glusterd", GF_LOG_ERROR, "Cluster lock held by %s"
+ " ,unlock req from %s!", owner_str, new_owner_str);
+ goto out;
+ }
+
+ ret = glusterd_unset_lock_owner (uuid);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR, "Unable to clear cluster "
+ "lock");
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+int
+glusterd_get_uuid (uuid_t *uuid)
+{
+ glusterd_conf_t *priv = NULL;
+
+ priv = THIS->private;
+
+ GF_ASSERT (priv);
+
+ uuid_copy (*uuid, priv->uuid);
+
+ return 0;
+}
+
+int
+glusterd_submit_request (glusterd_peerinfo_t *peerinfo, void *req,
+ call_frame_t *frame, rpc_clnt_prog_t *prog,
+ int procnum, struct iobref *iobref,
+ gd_serialize_t sfunc, xlator_t *this,
+ fop_cbk_fn_t cbkfn)
+{
+ int ret = -1;
+ struct iobuf *iobuf = NULL;
+ int count = 0;
+ char new_iobref = 0, start_ping = 0;
+ struct iovec iov = {0, };
+
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (this);
+
+ iobuf = iobuf_get (this->ctx->iobuf_pool);
+ if (!iobuf) {
+ goto out;
+ };
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = 128 * GF_UNIT_KB;
+
+ /* Create the xdr payload */
+ if (req && sfunc) {
+ ret = sfunc (iov, req);
+ if (ret == -1) {
+ goto out;
+ }
+ iov.iov_len = ret;
+ count = 1;
+ }
+ /* Send the msg */
+ ret = rpc_clnt_submit (peerinfo->rpc, prog, procnum, cbkfn,
+ &iov, count,
+ NULL, 0, iobref, frame);
+
+ if (ret == 0) {
+ pthread_mutex_lock (&peerinfo->rpc->conn.lock);
+ {
+ if (!peerinfo->rpc->conn.ping_started) {
+ start_ping = 1;
+ }
+ }
+ pthread_mutex_unlock (&peerinfo->rpc->conn.lock);
+ }
+
+ if (start_ping)
+ //client_start_ping ((void *) this);
+
+ ret = 0;
+out:
+ if (new_iobref) {
+ iobref_unref (iobref);
+ }
+
+ iobuf_unref (iobuf);
+
+ return ret;
+}
+
+
+struct iobuf *
+glusterd_serialize_reply (rpcsvc_request_t *req, void *arg,
+ gd_serialize_t sfunc, struct iovec *outmsg)
+{
+ struct iobuf *iob = NULL;
+ ssize_t retlen = -1;
+
+ /* First, get the io buffer into which the reply in arg will
+ * be serialized.
+ */
+ iob = iobuf_get (req->conn->svc->ctx->iobuf_pool);
+ if (!iob) {
+ gf_log ("", GF_LOG_ERROR, "Failed to get iobuf");
+ goto ret;
+ }
+
+ iobuf_to_iovec (iob, outmsg);
+ /* Use the given serializer to translate the give C structure in arg
+ * to XDR format which will be written into the buffer in outmsg.
+ */
+ /* retlen is used to received the error since size_t is unsigned and we
+ * need -1 for error notification during encoding.
+ */
+ retlen = sfunc (*outmsg, arg);
+ if (retlen == -1) {
+ gf_log ("", GF_LOG_ERROR, "Failed to encode message");
+ goto ret;
+ }
+
+ outmsg->iov_len = retlen;
+ret:
+ if (retlen == -1) {
+ iobuf_unref (iob);
+ iob = NULL;
+ }
+
+ return iob;
+}
+
+int
+glusterd_submit_reply (rpcsvc_request_t *req, void *arg,
+ struct iovec *payload, int payloadcount,
+ struct iobref *iobref, gd_serialize_t sfunc)
+{
+ struct iobuf *iob = NULL;
+ int ret = -1;
+ struct iovec rsp = {0,};
+ char new_iobref = 0;
+
+ if (!req) {
+ GF_ASSERT (req);
+ goto out;
+ }
+
+
+ if (!iobref) {
+ iobref = iobref_new ();
+ if (!iobref) {
+ gf_log ("", GF_LOG_ERROR, "out of memory");
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iob = glusterd_serialize_reply (req, arg, sfunc, &rsp);
+ if (!iob) {
+ gf_log ("", GF_LOG_ERROR, "Failed to serialize reply");
+ goto out;
+ }
+
+ iobref_add (iobref, iob);
+
+ ret = rpcsvc_submit_generic (req, &rsp, 1, payload, payloadcount,
+ iobref);
+
+ /* Now that we've done our job of handing the message to the RPC layer
+ * we can safely unref the iob in the hope that RPC layer must have
+ * ref'ed the iob on receiving into the txlist.
+ */
+ iobuf_unref (iob);
+ if (ret == -1) {
+ gf_log ("", GF_LOG_ERROR, "Reply submission failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+
+ if (new_iobref) {
+ iobref_unref (iobref);
+ }
+
+ return ret;
+}
+
+gf_boolean_t
+glusterd_check_volume_exists (char *volname)
+{
+ char pathname[1024] = {0,};
+ struct stat stbuf = {0,};
+ int32_t ret = -1;
+
+ snprintf (pathname, 1024, "%s/vols/%s", GLUSTERD_DEFAULT_WORKDIR,
+ volname);
+
+ ret = stat (pathname, &stbuf);
+
+ if (ret) {
+ gf_log ("", GF_LOG_DEBUG, "Volume %s does not exist."
+ "stat failed with errno: %d", volname, errno);
+ return _gf_false;
+ }
+
+ return _gf_true;
+}
+
+int32_t
+glusterd_volinfo_new (glusterd_volinfo_t **volinfo)
+{
+ glusterd_volinfo_t *new_volinfo = NULL;
+ int32_t ret = -1;
+
+ GF_ASSERT (volinfo);
+
+ new_volinfo = GF_CALLOC (1, sizeof(*new_volinfo),
+ gf_gld_mt_glusterd_volinfo_t);
+
+ if (!new_volinfo)
+ goto out;
+
+ INIT_LIST_HEAD (&new_volinfo->vol_list);
+ INIT_LIST_HEAD (&new_volinfo->bricks);
+
+ *volinfo = new_volinfo;
+
+ ret = 0;
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_brickinfo_new (glusterd_brickinfo_t **brickinfo)
+{
+ glusterd_brickinfo_t *new_brickinfo = NULL;
+ int32_t ret = -1;
+
+ GF_ASSERT (brickinfo);
+
+ new_brickinfo = GF_CALLOC (1, sizeof(*new_brickinfo),
+ gf_gld_mt_glusterd_brickinfo_t);
+
+ if (!new_brickinfo)
+ goto out;
+
+ INIT_LIST_HEAD (&new_brickinfo->brick_list);
+
+ *brickinfo = new_brickinfo;
+
+ ret = 0;
+
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd_brickinfo_from_brick (char *brick, glusterd_brickinfo_t **brickinfo)
+{
+ int32_t ret = -1;
+ glusterd_brickinfo_t *new_brickinfo = NULL;
+ char *hostname = NULL;
+ char *path = NULL;
+
+ GF_ASSERT (brick);
+ GF_ASSERT (brickinfo);
+
+ hostname = strtok (brick, ":");
+ path = strtok (NULL, ":");
+
+ GF_ASSERT (hostname);
+ GF_ASSERT (path);
+
+ ret = glusterd_brickinfo_new (&new_brickinfo);
+
+ if (ret)
+ goto out;
+
+ strncpy (new_brickinfo->hostname, hostname, 1024);
+ strncpy (new_brickinfo->path, path, 1024);
+
+ *brickinfo = new_brickinfo;
+
+ ret = 0;
+out:
+ gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
new file mode 100644
index 00000000000..01c485b081b
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -0,0 +1,80 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_UTILS_H
+#define _GLUSTERD_UTILS_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+#include "glusterd.h"
+#include "rpc-clnt.h"
+
+struct glusterd_lock_ {
+ uuid_t owner;
+ time_t timestamp;
+};
+
+typedef struct glusterd_lock_ glusterd_lock_t;
+
+int32_t
+glusterd_lock (uuid_t new_owner);
+
+int32_t
+glusterd_unlock (uuid_t owner);
+
+int32_t
+glusterd_get_uuid (uuid_t *uuid);
+
+int
+glusterd_submit_reply (rpcsvc_request_t *req, void *arg,
+ struct iovec *payload, int payloadcount,
+ struct iobref *iobref, gd_serialize_t sfunc);
+
+int
+glusterd_submit_request (glusterd_peerinfo_t *peerinfo, void *req,
+ call_frame_t *frame, struct rpc_clnt_program *prog,
+ int procnum, struct iobref *iobref,
+ gd_serialize_t sfunc, xlator_t *this,
+ fop_cbk_fn_t cbkfn);
+
+int32_t
+glusterd_volinfo_new (glusterd_volinfo_t **volinfo);
+
+gf_boolean_t
+glusterd_check_volume_exists (char *volname);
+
+int32_t
+glusterd_brickinfo_new (glusterd_brickinfo_t **brickinfo);
+
+int32_t
+glusterd_brickinfo_from_brick (char *brick, glusterd_brickinfo_t **brickinfo);
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
new file mode 100644
index 00000000000..9c688c136b9
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -0,0 +1,478 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is GF_FREE software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+#include <time.h>
+#include <sys/uio.h>
+#include <sys/resource.h>
+
+#include <libgen.h>
+#include <uuid/uuid.h>
+
+#include "glusterd.h"
+#include "rpcsvc.h"
+#include "fnmatch.h"
+#include "xlator.h"
+//#include "protocol.h"
+#include "call-stub.h"
+#include "defaults.h"
+#include "list.h"
+#include "dict.h"
+#include "compat.h"
+#include "compat-errno.h"
+#include "statedump.h"
+//#include "md5.h"
+#include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
+
+
+static uuid_t glusterd_uuid;
+extern struct rpcsvc_program glusterd1_mop_prog;
+extern struct rpc_clnt_program glusterd3_1_mgmt_prog;
+
+static int
+glusterd_retrieve_uuid ()
+{
+ return -1;
+}
+
+static int
+glusterd_store_uuid ()
+{
+ return 0;
+}
+
+static int
+glusterd_uuid_init ()
+{
+ int ret = -1;
+ char str[50];
+
+ ret = glusterd_retrieve_uuid ();
+
+ if (!ret) {
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "retrieved UUID: %s", glusterd_uuid);
+ return 0;
+ }
+
+ uuid_generate (glusterd_uuid);
+ uuid_unparse (glusterd_uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "generated UUID: %s",str);
+
+ ret = glusterd_store_uuid ();
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to store generated UUID");
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/* xxx_MOPS */
+
+#if 0
+
+#endif
+
+
+
+
+
+
+
+
+
+
+/*
+ * glusterd_nop_cbk - nop callback for server protocol
+ * @frame: call frame
+ * @cookie:
+ * @this:
+ * @op_ret: return value
+ * @op_errno: errno
+ *
+ * not for external reference
+ */
+/*int
+glusterd_nop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno)
+{
+ glusterd_state_t *state = NULL;
+
+ state = GLUSTERD_CALL_STATE(frame);
+
+ if (state)
+ free_state (state);
+ STACK_DESTROY (frame->root);
+ return 0;
+}
+*/
+
+
+int
+glusterd_priv (xlator_t *this)
+{
+ return 0;
+}
+
+
+
+int32_t
+mem_acct_init (xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init (this, gf_gld_mt_end + 1);
+
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Memory accounting init"
+ " failed");
+ return ret;
+ }
+
+ return ret;
+}
+
+int
+glusterd_rpcsvc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
+ void *data)
+{
+ xlator_t *this = NULL;
+ rpc_transport_t *xprt = NULL;
+
+ if (!xl || !data) {
+ gf_log ("glusterd", GF_LOG_WARNING,
+ "Calling rpc_notify without initializing");
+ goto out;
+ }
+
+ this = xl;
+ xprt = data;
+
+ switch (event) {
+ case RPCSVC_EVENT_ACCEPT:
+ {
+ /* Have a structure per new connection */
+ /* TODO: Should we create anything here at all ? * /
+ conn = create_server_conn_state (this, xprt);
+ if (!conn)
+ goto out;
+
+ xprt->protocol_private = conn;
+ */
+ xprt->mydata = this;
+ break;
+ }
+ case RPCSVC_EVENT_DISCONNECT:
+ /* conn = get_server_conn_state (this, xprt);
+ if (conn)
+ destroy_server_conn_state (conn);
+ */
+ break;
+ default:
+ break;
+ }
+
+out:
+ return 0;
+}
+
+/*
+ * init - called during glusterd initialization
+ *
+ * @this:
+ *
+ */
+int
+init (xlator_t *this)
+{
+ int32_t ret = -1;
+ rpcsvc_t *rpc = NULL;
+ glusterd_conf_t *conf = NULL;
+ data_t *dir_data = NULL;
+ char dirname [PATH_MAX];
+ struct stat buf = {0,};
+ char *port_str = NULL;
+ int port_num = 0;
+
+
+ dir_data = dict_get (this->options, "working-directory");
+
+ if (!dir_data) {
+ //Use default working dir
+ strncpy (dirname, GLUSTERD_DEFAULT_WORKDIR, PATH_MAX);
+ } else {
+ strncpy (dirname, dir_data->data, PATH_MAX);
+ }
+
+ ret = stat (dirname, &buf);
+ if ((ret != 0) && (ENOENT != errno)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "stat fails on %s, exiting. (errno = %d)",
+ dirname, errno);
+ exit (1);
+ }
+
+ if ((!ret) && (!S_ISDIR(buf.st_mode))) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "Provided working area %s is not a directory,"
+ "exiting", dirname);
+ exit (1);
+ }
+
+
+ if ((-1 == ret) && (ENOENT == errno)) {
+ ret = mkdir (dirname, 0644);
+
+ if (-1 == ret) {
+ gf_log (this->name, GF_LOG_CRITICAL,
+ "Unable to create directory %s"
+ " ,errno = %d", dirname, errno);
+ }
+ }
+
+ gf_log (this->name, GF_LOG_NORMAL, "Using %s as working directory",
+ dirname);
+
+
+ rpc = rpcsvc_init (this->ctx, this->options);
+ if (rpc == NULL) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "failed to init rpc");
+ goto out;
+ }
+
+ ret = rpcsvc_register_notify (rpc, glusterd_rpcsvc_notify, this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "rpcsvc_register_notify returned %d", ret);
+ goto out;
+ }
+
+ glusterd1_mop_prog.options = this->options;
+ port_str = getenv ("GLUSTERD_LOCAL_PORT");
+ if (port_str) {
+ port_num = atoi (port_str);
+ glusterd1_mop_prog.progport = port_num;
+ }
+
+ ret = rpcsvc_program_register (rpc, glusterd1_mop_prog);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "rpcsvc_program_register returned %d", ret);
+ goto out;
+ }
+
+//TODO: Waiting on handshake code
+/* gluster_handshake_prog.options = this->options;
+ ret = rpcsvc_program_register (conf->rpc, gluster_handshake_prog);
+ if (ret)
+ goto out;
+*/
+ conf = GF_CALLOC (1, sizeof (glusterd_conf_t),
+ gf_gld_mt_glusterd_conf_t);
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+ INIT_LIST_HEAD (&conf->peers);
+ INIT_LIST_HEAD (&conf->volumes);
+ pthread_mutex_init (&conf->mutex, NULL);
+ conf->rpc = rpc;
+ conf->mgmt = &glusterd3_1_mgmt_prog;
+ strncpy (conf->workdir, dirname, PATH_MAX);
+
+ this->private = conf;
+ //this->ctx->top = this;
+
+ ret = glusterd_uuid_init ();
+
+ if (ret < 0)
+ goto out;
+
+ glusterd_friend_sm_init ();
+ glusterd_op_sm_init ();
+
+ memcpy(conf->uuid, glusterd_uuid, sizeof (uuid_t));
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+
+/*int
+glusterd_pollin (xlator_t *this, transport_t *trans)
+{
+ char *hdr = NULL;
+ size_t hdrlen = 0;
+ int ret = -1;
+ struct iobuf *iobuf = NULL;
+
+
+ ret = transport_receive (trans, &hdr, &hdrlen, &iobuf);
+
+ if (ret == 0)
+ ret = glusterd_interpret (this, trans, hdr,
+ hdrlen, iobuf);
+
+ ret = glusterd_friend_sm ();
+
+ glusterd_op_sm ();
+
+ GF_FREE (hdr);
+
+ return ret;
+}
+*/
+
+/*
+ * fini - finish function for server protocol, called before
+ * unloading server protocol.
+ *
+ * @this:
+ *
+ */
+void
+fini (xlator_t *this)
+{
+ glusterd_conf_t *conf = this->private;
+
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+ GF_FREE (conf);
+ this->private = NULL;
+out:
+ return;
+}
+
+/*
+ * server_protocol_notify - notify function for server protocol
+ * @this:
+ * @trans:
+ * @event:
+ *
+ */
+int
+notify (xlator_t *this, int32_t event, void *data, ...)
+{
+ int ret = 0;
+ //transport_t *trans = data;
+ //peer_info_t *peerinfo = NULL;
+ //peer_info_t *myinfo = NULL;
+
+/* if (trans != NULL) {
+ peerinfo = &(trans->peerinfo);
+ myinfo = &(trans->myinfo);
+ }
+*/
+ switch (event) {
+
+ case GF_EVENT_POLLIN:
+ // ret = glusterd_pollin (this, trans);
+ break;
+
+
+ case GF_EVENT_POLLERR:
+ break;
+
+ case GF_EVENT_TRANSPORT_CLEANUP:
+ break;
+
+ default:
+ default_notify (this, event, data);
+ break;
+
+ }
+
+ return ret;
+}
+
+
+void
+glusterd_init (int signum)
+{
+ int ret = -1;
+
+ glusterfs_this_set ((xlator_t *)CTX->active);
+
+ ret = glusterd_probe_begin (NULL, "localhost");
+
+ if (!ret) {
+ ret = glusterd_friend_sm ();
+
+ glusterd_op_sm ();
+ }
+
+ gf_log ("glusterd", GF_LOG_WARNING, "ret = %d", ret);
+
+ //return 0;
+}
+
+void
+glusterd_op_init (int signum)
+{
+ int ret = -1;
+
+ glusterfs_this_set ((xlator_t *)CTX->active);
+
+ //ret = glusterd_create_volume ("vol1");
+
+/* if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+*/
+
+ gf_log ("glusterd", GF_LOG_WARNING, "ret = %d", ret);
+}
+
+
+
+//struct xlator_mops mops = {
+//};
+
+struct xlator_fops fops = {
+};
+
+struct xlator_cbks cbks = {
+};
+
+struct xlator_dumpops dumpops = {
+ .priv = glusterd_priv,
+};
+
+
+struct volume_options options[] = {
+ { .key = {"working-dir"},
+ .type = GF_OPTION_TYPE_PATH,
+ },
+
+ { .key = {NULL} },
+};
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
new file mode 100644
index 00000000000..b3d53244eff
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -0,0 +1,218 @@
+/*
+ Copyright (c) 2006-2009 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _GLUSTERD_H_
+#define _GLUSTERD_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <pthread.h>
+#include <uuid/uuid.h>
+
+#include "rpc-clnt.h"
+#include "glusterfs.h"
+#include "xlator.h"
+#include "logging.h"
+#include "call-stub.h"
+#include "authenticate.h"
+#include "fd.h"
+#include "byte-order.h"
+//#include "protocol.h"
+#include "glusterd-mem-types.h"
+#include "rpcsvc.h"
+#include "glusterd1.h"
+
+
+
+
+/*struct _glusterd_connection {
+ struct list_head list;
+ char *id;
+ int ref;
+ int active_transports;
+ pthread_mutex_t lock;
+ char disconnected;
+ xlator_t *bound_xl;
+};
+
+typedef struct _glusterd_connection glusterd_connection_t;
+*/
+
+
+typedef enum glusterd_peer_state_ {
+ GD_PEER_STATE_NONE = 0,
+ GD_PEER_STATE_INBOUND,
+ GD_PEER_STATE_OUTBOUND,
+ GD_PEER_STATE_FRIEND
+} glusterd_peer_state_t;
+
+
+typedef struct glusterd_peer_state_info_ {
+ glusterd_peer_state_t state;
+ struct timeval transition_time;
+}glusterd_peer_state_info_t;
+
+
+struct glusterd_peerinfo_ {
+ uuid_t uuid;
+ glusterd_peer_state_info_t state;
+ char *hostname;
+ int port;
+ struct list_head uuid_list;
+ struct list_head op_peers_list;
+ // struct list_head pending_uuid;
+ struct rpc_clnt *rpc;
+};
+
+typedef struct glusterd_peerinfo_ glusterd_peerinfo_t;
+
+typedef struct {
+ struct _volfile_ctx *volfile;
+ pthread_mutex_t mutex;
+ struct list_head peers;
+// struct list_head pending_peers;
+ gf_boolean_t verify_volfile_checksum;
+ gf_boolean_t trace;
+ uuid_t uuid;
+ char workdir[PATH_MAX];
+ rpcsvc_t *rpc;
+ rpc_clnt_prog_t *mgmt;
+ struct list_head volumes;
+} glusterd_conf_t;
+
+struct glusterd_brickinfo {
+ char hostname[1024];
+ char path[PATH_MAX];
+ struct list_head brick_list;
+};
+
+typedef struct glusterd_brickinfo glusterd_brickinfo_t;
+
+struct glusterd_volinfo_ {
+ char volname[1024];
+ int type;
+ int brick_count;
+ struct list_head vol_list;
+ struct list_head bricks;
+};
+
+typedef struct glusterd_volinfo_ glusterd_volinfo_t;
+
+
+#define GLUSTERD_DEFAULT_WORKDIR "/etc/glusterd"
+#define GLUSTERD_DEFAULT_PORT 4284
+
+typedef ssize_t (*gd_serialize_t) (struct iovec outmsg, void *args);
+
+//void glusterd_init (int);
+
+int
+glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr);
+
+/*int
+glusterd_interpret (xlator_t *this, transport_t *trans,
+ char *hdr_p, size_t hdrlen, struct iobuf *iobuf);
+
+
+int
+glusterd_friend_probe (const char *hoststr);
+*/
+
+
+int
+glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *hostname);
+
+int
+glusterd_friend_find (uuid_t uuid, char *hostname,
+ glusterd_peerinfo_t **peerinfo);
+
+int
+glusterd_friend_add (const char *hoststr,
+ glusterd_peer_state_t state,
+ uuid_t *uuid, struct rpc_clnt *rpc,
+ glusterd_peerinfo_t **friend);
+/*
+int
+glusterd_xfer_friend_req_msg (glusterd_peerinfo_t *peerinfo, xlator_t *this);
+
+int
+glusterd_xfer_cluster_lock_req (xlator_t *this, int32_t *lock_count);
+*/
+
+/*int
+glusterd_xfer_cluster_unlock_req (xlator_t *this, int32_t *unlock_count);
+*/
+
+int
+glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status);
+
+int
+glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status);
+
+int
+glusterd_op_stage_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status);
+
+int
+glusterd_op_commmit_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status);
+
+int32_t
+glusterd_create_volume (rpcsvc_request_t *req, dict_t *dict);
+
+int
+glusterd_rpc_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event,
+ void *data);
+int
+glusterd_handle_incoming_friend_req (rpcsvc_request_t *req);
+
+int
+glusterd_handle_probe_query (rpcsvc_request_t *req);
+
+int
+glusterd_handle_cluster_lock (rpcsvc_request_t *req);
+
+int
+glusterd_handle_cluster_unlock (rpcsvc_request_t *req);
+
+int
+glusterd_handle_stage_op (rpcsvc_request_t *req);
+
+int
+glusterd_handle_commit_op (rpcsvc_request_t *req);
+
+int
+glusterd_handle_cli_probe (rpcsvc_request_t *req);
+
+int
+glusterd_handle_create_volume (rpcsvc_request_t *req);
+
+int
+glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
+ int32_t op_errno, char *hostname);
+
+
+int
+glusterd_op_commit_send_resp (rpcsvc_request_t *req,
+ int32_t op, int32_t status);
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd3_1-mops.c b/xlators/mgmt/glusterd/src/glusterd3_1-mops.c
new file mode 100644
index 00000000000..d377f09ac80
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd3_1-mops.c
@@ -0,0 +1,938 @@
+/*
+ Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ This file is part of GlusterFS.
+
+ GlusterFS is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ GlusterFS is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see
+ <http://www.gnu.org/licenses/>.
+*/
+
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "rpc-clnt.h"
+#include "glusterd1.h"
+#include "gd-xdr.h"
+#include "compat-errno.h"
+#include "glusterd-op-sm.h"
+#include "glusterd-sm.h"
+#include "glusterd.h"
+#include "protocol-common.h"
+#include "glusterd-utils.h"
+#include <sys/uio.h>
+
+
+#define SERVER_PATH_MAX (16 * 1024)
+
+
+extern glusterd_op_info_t opinfo;
+
+int
+glusterd_null (rpcsvc_request_t *req)
+{
+/* gf_common_rsp rsp = {0,};
+
+ rsp.gfs_id = req->gfs_id;
+ //Accepted
+ rsp.op_ret = 0;
+
+ server_submit_reply (NULL, req, &rsp, NULL, 0, NULL,
+ (gfs_serialize_t)xdr_serialize_common_rsp);
+*/
+ return 0;
+}
+
+int
+glusterd3_1_probe_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_probe_rsp rsp = {{0},};
+ glusterd_conf_t *conf = NULL;
+ int ret = 0;
+ char str[50];
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerinfo_t *dup_peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+
+ conf = THIS->private;
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = gd_xdr_to_mgmt_probe_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ //rsp.op_ret = -1;
+ //rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received probe resp from uuid: %s, host: %s",
+ str, rsp.hostname);
+
+ ret = glusterd_friend_find (rsp.uuid, rsp.hostname, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (!peerinfo->hostname) {
+ glusterd_friend_find (NULL, rsp.hostname, &dup_peerinfo);
+ GF_ASSERT (dup_peerinfo);
+ GF_ASSERT (dup_peerinfo->hostname);
+ peerinfo->hostname = gf_strdup (rsp.hostname);
+ peerinfo->rpc = dup_peerinfo->rpc;
+ list_del_init (&dup_peerinfo->uuid_list);
+ GF_FREE (dup_peerinfo->hostname);
+ GF_FREE (dup_peerinfo);
+ }
+ GF_ASSERT (peerinfo->hostname);
+ uuid_copy (peerinfo->uuid, rsp.uuid);
+
+ ret = glusterd_friend_sm_new_event
+ (GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ event->peerinfo = peerinfo;
+ event->ctx = ((call_frame_t *)myframe)->local;
+ ret = glusterd_friend_sm_inject_event (event);
+
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Received resp to probe req");
+
+ return ret;
+
+out:
+ return ret;
+}
+
+int
+glusterd3_1_friend_add_cbk (struct rpc_req * req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_friend_rsp rsp = {{0},};
+ glusterd_conf_t *conf = NULL;
+ int ret = -1;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ glusterd_probe_ctx_t *ctx = NULL;
+
+ conf = THIS->private;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_friend_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s, host: %s",
+ (op_ret)?"RJT":"ACC", str, rsp.hostname);
+
+ ret = glusterd_friend_find (rsp.uuid, rsp.hostname, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_FRIEND_EVENT_RCVD_RJT;
+ else
+ event_type = GD_FRIEND_EVENT_RCVD_ACC;
+
+ ret = glusterd_friend_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+ event->peerinfo = peerinfo;
+
+ ret = glusterd_friend_sm_inject_event (event);
+
+ if (ret)
+ goto out;
+
+ ctx = ((call_frame_t *)myframe)->local;
+
+ GF_ASSERT (ctx);
+
+ ret = glusterd_xfer_cli_probe_resp (ctx->req, op_ret, op_errno,
+ ctx->hostname);
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd3_1_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_cluster_lock_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_cluster_lock_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s",
+ (op_ret)?"RJT":"ACC", str);
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ else
+ event_type = GD_OP_EVENT_RCVD_ACC;
+
+ ret = glusterd_op_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd3_1_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_cluster_lock_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+
+
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_cluster_unlock_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s",
+ (op_ret)?"RJT":"ACC", str);
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ else
+ event_type = GD_OP_EVENT_RCVD_ACC;
+
+ ret = glusterd_op_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd3_1_stage_op_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_stage_op_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+
+
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_stage_op_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s",
+ (op_ret)?"RJT":"ACC", str);
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ else
+ event_type = GD_OP_EVENT_RCVD_ACC;
+
+ ret = glusterd_op_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+
+
+out:
+ return ret;
+}
+
+int32_t
+glusterd3_1_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
+{
+ gd1_mgmt_commit_op_rsp rsp = {{0},};
+ int ret = -1;
+ int32_t op_ret = -1;
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char str[50] = {0,};
+
+
+ GF_ASSERT (req);
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ }
+
+ ret = gd_xdr_to_mgmt_commit_op_req (*iov, &rsp);
+ if (ret < 0) {
+ gf_log ("", GF_LOG_ERROR, "error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+ uuid_unparse (rsp.uuid, str);
+
+ op_ret = rsp.op_ret;
+
+ gf_log ("glusterd", GF_LOG_NORMAL,
+ "Received %s from uuid: %s",
+ (op_ret)?"RJT":"ACC", str);
+
+ ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
+
+ if (ret) {
+ GF_ASSERT (0);
+ }
+
+ if (op_ret)
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ else
+ event_type = GD_OP_EVENT_RCVD_ACC;
+
+ ret = glusterd_op_sm_new_event (event_type, &event);
+
+ if (ret) {
+ gf_log ("glusterd", GF_LOG_ERROR,
+ "Unable to get event");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+
+
+out:
+ return ret;
+}
+
+
+
+int32_t
+glusterd3_1_probe (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_probe_req req = {{0},};
+ int ret = 0;
+ char *hostname = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ hostname = data;
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
+ ret = glusterd_friend_find (NULL, hostname, &peerinfo);
+
+ if (ret) {
+ //We should not reach this state ideally
+ GF_ASSERT (0);
+ goto out;
+ }
+
+ uuid_copy (req.uuid, priv->uuid);
+ req.hostname = gf_strdup (hostname);
+
+ ret = glusterd_submit_request (peerinfo, &req, frame, priv->mgmt,
+ GD_MGMT_PROBE_QUERY,
+ NULL, gd_xdr_from_mgmt_probe_req,
+ this, glusterd3_1_probe_cbk);
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+
+int32_t
+glusterd3_1_friend_add (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_friend_req req = {{0},};
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_req_ctx_t *ctx = NULL;
+
+
+ if (!frame || !this || !data) {
+ ret = -1;
+ goto out;
+ }
+
+ event = data;
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
+ ctx = event->ctx;
+
+ peerinfo = event->peerinfo;
+
+ uuid_copy (req.uuid, priv->uuid);
+ req.hostname = gf_strdup (peerinfo->hostname);
+
+ ret = glusterd_submit_request (peerinfo, &req, frame, priv->mgmt,
+ GD_MGMT_FRIEND_ADD,
+ NULL, gd_xdr_from_mgmt_friend_req,
+ this, glusterd3_1_friend_add_cbk);
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd3_1_cluster_lock (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_cluster_lock_req req = {{0},};
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ call_frame_t *dummy_frame = NULL;
+ int32_t pending_lock = 0;
+
+ if (!this) {
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+ glusterd_get_uuid (&req.uuid);
+
+ GF_ASSERT (priv);
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ continue;
+
+ ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
+ priv->mgmt, GD_MGMT_CLUSTER_LOCK,
+ NULL,
+ gd_xdr_from_mgmt_cluster_lock_req,
+ this, glusterd3_1_cluster_lock_cbk);
+ if (!ret)
+ pending_lock++;
+ //TODO: Instead of keeping count, maintain a list of locked
+ //UUIDs.
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent lock req to %d peers",
+ pending_lock);
+ opinfo.pending_count = pending_lock;
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd3_1_cluster_unlock (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_cluster_lock_req req = {{0},};
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t pending_unlock = 0;
+ call_frame_t *dummy_frame = NULL;
+
+ if (!this ) {
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+
+ glusterd_get_uuid (&req.uuid);
+
+ GF_ASSERT (priv);
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ continue;
+
+ ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
+ priv->mgmt, GD_MGMT_CLUSTER_UNLOCK,
+ NULL,
+ gd_xdr_from_mgmt_cluster_unlock_req,
+ this, glusterd3_1_cluster_unlock_cbk);
+ if (!ret)
+ pending_unlock++;
+ //TODO: Instead of keeping count, maintain a list of locked
+ //UUIDs.
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent unlock req to %d peers",
+ pending_unlock);
+ opinfo.pending_count = pending_unlock;
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd3_1_stage_op (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_stage_op_req *req = NULL;
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t pending_peer = 0;
+ int i = 0;
+ call_frame_t *dummy_frame = NULL;
+
+ if (!this) {
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
+ for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
+ if (opinfo.pending_op[i])
+ break;
+ }
+
+ if (GD_OP_MAX == i) {
+
+ //No pending ops, inject stage_acc
+
+ glusterd_op_sm_event_t *event = NULL;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_STAGE_ACC,
+ &event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ return ret;
+ }
+
+
+ ret = glusterd_op_build_payload (i, &req);
+
+ if (ret)
+ goto out;
+
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ continue;
+
+ ret = glusterd_submit_request (peerinfo, req, dummy_frame,
+ priv->mgmt, GD_MGMT_STAGE_OP,
+ NULL,
+ gd_xdr_from_mgmt_stage_op_req,
+ this, glusterd3_1_stage_op_cbk);
+ if (!ret)
+ pending_peer++;
+ //TODO: Instead of keeping count, maintain a list of pending
+ //UUIDs.
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+ pending_peer);
+ opinfo.pending_count = pending_peer;
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
+glusterd3_1_commit_op (call_frame_t *frame, xlator_t *this,
+ void *data)
+{
+ gd1_mgmt_commit_op_req *req = NULL;
+ int ret = 0;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t pending_peer = 0;
+ int i = 0;
+ call_frame_t *dummy_frame = NULL;
+
+ if (!this) {
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+
+ GF_ASSERT (priv);
+
+ for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
+ if (opinfo.pending_op[i])
+ break;
+ }
+
+ if (GD_OP_MAX == i) {
+
+ //No pending ops, inject stage_acc
+
+ glusterd_op_sm_event_t *event = NULL;
+
+ ret = glusterd_op_sm_new_event (GD_OP_EVENT_COMMIT_ACC,
+ &event);
+
+ if (ret)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event (event);
+
+ return ret;
+ }
+
+
+ ret = glusterd_op_build_payload (i, (gd1_mgmt_stage_op_req **)&req);
+
+ if (ret)
+ goto out;
+
+ list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+ GF_ASSERT (peerinfo);
+
+ if (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ dummy_frame = create_frame (this, this->ctx->pool);
+
+ if (!dummy_frame)
+ continue;
+
+ ret = glusterd_submit_request (peerinfo, req, dummy_frame,
+ priv->mgmt, GD_MGMT_COMMIT_OP,
+ NULL,
+ gd_xdr_from_mgmt_commit_op_req,
+ this, glusterd3_1_commit_op_cbk);
+ if (!ret)
+ pending_peer++;
+ //TODO: Instead of keeping count, maintain a list of pending
+ //UUIDs.
+ }
+
+ gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+ pending_peer);
+ opinfo.pending_count = pending_peer;
+
+out:
+ gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+
+int
+glusterd_handle_rpc_msg (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ GF_ASSERT (req);
+
+ //ret = glusterd1_mgmt_actors[req->procnum].actor (req);
+ //
+
+ switch (req->procnum) {
+ case GD_MGMT_PROBE_QUERY:
+ ret = glusterd_handle_probe_query (req);
+ break;
+
+ case GD_MGMT_FRIEND_ADD:
+ ret = glusterd_handle_incoming_friend_req (req);
+ break;
+
+ case GD_MGMT_CLUSTER_LOCK:
+ ret = glusterd_handle_cluster_lock (req);
+ break;
+
+ case GD_MGMT_CLUSTER_UNLOCK:
+ ret = glusterd_handle_cluster_unlock (req);
+ break;
+
+ case GD_MGMT_STAGE_OP:
+ ret = glusterd_handle_stage_op (req);
+ break;
+
+ case GD_MGMT_COMMIT_OP:
+ ret = glusterd_handle_commit_op (req);
+ break;
+
+ case GD_MGMT_CLI_PROBE:
+ ret = glusterd_handle_cli_probe (req);
+ break;
+
+ case GD_MGMT_CLI_CREATE_VOLUME:
+ ret = glusterd_handle_create_volume (req);
+ break;
+
+ default:
+ GF_ASSERT (0);
+ }
+
+ if (!ret) {
+ glusterd_friend_sm ();
+ glusterd_op_sm ();
+ }
+
+ return ret;
+}
+
+
+rpcsvc_actor_t glusterd1_mgmt_actors[] = {
+ [GD_MGMT_NULL] = { "NULL", GD_MGMT_NULL, glusterd_null, NULL, NULL},
+ [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GD_MGMT_PROBE_QUERY, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GD_MGMT_FRIEND_ADD, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GD_MGMT_CLUSTER_LOCK, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GD_MGMT_CLUSTER_UNLOCK, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_STAGE_OP] = { "STAGE_OP", GD_MGMT_STAGE_OP, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_COMMIT_OP] = { "COMMIT_OP", GD_MGMT_COMMIT_OP, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_CLI_PROBE] = { "CLI_PROBE", GD_MGMT_CLI_PROBE, glusterd_handle_rpc_msg, NULL, NULL},
+ [GD_MGMT_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GD_MGMT_CLI_CREATE_VOLUME, glusterd_handle_rpc_msg, NULL, NULL},
+};
+
+/*rpcsvc_actor_t glusterd1_mgmt_actors[] = {
+ [GD_MGMT_NULL] = { "NULL", GD_MGMT_NULL, glusterd_null, NULL, NULL},
+ [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GD_MGMT_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL},
+ [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GD_MGMT_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, NULL},
+ [GD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, NULL},
+ [GD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, NULL},
+ [GD_MGMT_STAGE_OP] = { "STAGE_OP", GD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, NULL},
+ [GD_MGMT_COMMIT_OP] = { "COMMIT_OP", GD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, NULL},
+ [GD_MGMT_CLI_PROBE] = { "CLI_PROBE", GD_MGMT_CLI_PROBE, glusterd_handle_cli_probe, NULL, NULL},
+};*/
+
+
+struct rpcsvc_program glusterd1_mop_prog = {
+ .progname = "GlusterD0.0.1",
+ .prognum = GLUSTERD1_MGMT_PROGRAM,
+ .progver = GLUSTERD1_MGMT_VERSION,
+ .numactors = GLUSTERD1_MGMT_PROCCNT,
+ .actors = glusterd1_mgmt_actors,
+ .progport = 4284,
+};
+
+
+struct rpc_clnt_procedure glusterd3_1_clnt_mgmt_actors[GD_MGMT_MAXVALUE] = {
+ [GD_MGMT_NULL] = {"NULL", NULL },
+ [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", glusterd3_1_probe},
+ [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", glusterd3_1_friend_add },
+ [GD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", glusterd3_1_cluster_lock},
+ [GD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK", glusterd3_1_cluster_unlock},
+ [GD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd3_1_stage_op},
+ [GD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd3_1_commit_op},
+// [GF_FOP_GETSPEC] = { "GETSPEC", client_getspec, client_getspec_cbk },
+};
+
+
+
+struct rpc_clnt_program glusterd3_1_mgmt_prog = {
+ .progname = "Mgmt 3.1",
+ .prognum = GLUSTERD1_MGMT_PROGRAM,
+ .progver = GLUSTERD1_MGMT_VERSION,
+ .proctable = glusterd3_1_clnt_mgmt_actors,
+ .numproc = GLUSTERD1_MGMT_PROCCNT,
+};