summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAtin Mukherjee <amukherj@redhat.com>2015-02-11 17:13:45 +0530
committerKrishnan Parthasarathi <kparthas@redhat.com>2015-02-20 04:04:08 -0800
commit9d842f965655bf70c643b4541844e83bc4e74190 (patch)
tree4d248f27d77993a478267a41e0517228214d7fa0
parent571a71f0acd0ec59340b9d0d2519793e33a1dc16 (diff)
glusterd: nfs,shd,quotad,snapd daemons refactoring
This patch ports nfs, shd, quotad & snapd with the approach suggested in http://www.gluster.org/pipermail/gluster-devel/2014-December/043180.html Change-Id: I4ea5b38793f87fc85cc9d2cf873727351dedffd2 BUG: 1191486 Signed-off-by: Atin Mukherjee <amukherj@redhat.com> Signed-off-by: Krishnan Parthasarathi <kparthas@redhat.com> Reviewed-on: http://review.gluster.org/9428 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Anand Nekkunti <anekkunt@redhat.com>
-rw-r--r--doc/daemon-management-framework.md38
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.c41
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.h3
-rw-r--r--tests/basic/uss.t15
-rw-r--r--tests/basic/volume-status.t5
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am12
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c7
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-helper.c18
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-helper.h26
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c135
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h56
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c114
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c15
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.c256
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.h36
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c114
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c134
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h49
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c22
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.c158
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.h36
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c23
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.c167
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.h32
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.c63
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.h37
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c407
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.h44
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c412
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h41
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c9
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c125
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.h30
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c339
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h78
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c831
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h93
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c338
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h30
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c75
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h29
46 files changed, 2809 insertions, 1750 deletions
diff --git a/doc/daemon-management-framework.md b/doc/daemon-management-framework.md
new file mode 100644
index 00000000000..592192e665d
--- /dev/null
+++ b/doc/daemon-management-framework.md
@@ -0,0 +1,38 @@
+
+How to introduce new daemons using daemon management framework
+==============================================================
+Glusterd manages GlusterFS daemons providing services like NFS, Proactive
+self-heal, Quota, User servicable snapshots etc. Following are some of the
+aspects that come under daemon management.
+
+Data members & functions of different management objects
+
+- **Connection Management**
+ - unix domain sockets based channel for internal communication
+ - rpc connection for the communication
+ - frame timeout value for UDS
+ - Methods - notify
+ - init, connect, termination, disconnect APIs can be invoked using the
+ connection management object
+
+- **Process Management**
+ - Name of the process
+ - pidfile to detect if the daemon is running
+ - loggging directory, log file, volfile, volfileserver & volfileid
+ - init, stop APIs can be invoked using the process management object
+
+- **Service Management**
+ - connection object
+ - process object
+ - online status
+ - Methods - manager, start, stop which can be abstracted as a common methods
+ or specific to service requirements
+ - init API can be invoked using the service management object
+
+ The above structures defines the skeleton of the daemon management framework.
+ Introduction of new daemons in GlusterFS needs to inherit these properties. Any
+ requirement specific to a daemon needs to be implemented in its own service
+ (for eg : snapd defines its own type glusterd_snapdsvc_t using glusterd_svc_t
+ and snapd specific data). New daemons will need to have its own service specific
+ code written in glusterd-<feature>-svc.h{c} and need to reuse the existing
+ framework.
diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c
index 4c8333b7d5d..20981ef9c9c 100644
--- a/rpc/rpc-lib/src/rpc-clnt.c
+++ b/rpc/rpc-lib/src/rpc-clnt.c
@@ -1744,6 +1744,47 @@ out:
return;
}
+void
+rpc_clnt_disconnect (struct rpc_clnt *rpc)
+{
+ rpc_clnt_connection_t *conn = NULL;
+ rpc_transport_t *trans = NULL;
+
+ if (!rpc)
+ goto out;
+
+ conn = &rpc->conn;
+
+ pthread_mutex_lock (&conn->lock);
+ {
+ if (conn->timer) {
+ gf_timer_call_cancel (rpc->ctx, conn->timer);
+ conn->timer = NULL;
+ }
+
+ if (conn->reconnect) {
+ gf_timer_call_cancel (rpc->ctx, conn->reconnect);
+ conn->reconnect = NULL;
+ }
+ conn->connected = 0;
+
+ if (conn->ping_timer) {
+ gf_timer_call_cancel (rpc->ctx, conn->ping_timer);
+ conn->ping_timer = NULL;
+ conn->ping_started = 0;
+ }
+ trans = conn->trans;
+ }
+ pthread_mutex_unlock (&conn->lock);
+
+ if (trans) {
+ rpc_transport_disconnect (trans);
+ }
+
+out:
+ return;
+}
+
void
rpc_clnt_reconfig (struct rpc_clnt *rpc, struct rpc_clnt_config *config)
diff --git a/rpc/rpc-lib/src/rpc-clnt.h b/rpc/rpc-lib/src/rpc-clnt.h
index 6492a81f24e..faae4855a6c 100644
--- a/rpc/rpc-lib/src/rpc-clnt.h
+++ b/rpc/rpc-lib/src/rpc-clnt.h
@@ -244,6 +244,9 @@ int rpcclnt_cbk_program_register (struct rpc_clnt *svc,
void
rpc_clnt_disable (struct rpc_clnt *rpc);
+void
+rpc_clnt_disconnect (struct rpc_clnt *rpc);
+
char
rpc_clnt_is_disabled (struct rpc_clnt *rpc);
diff --git a/tests/basic/uss.t b/tests/basic/uss.t
index aabe00cd880..2e4286860cf 100644
--- a/tests/basic/uss.t
+++ b/tests/basic/uss.t
@@ -12,6 +12,17 @@ function check_readonly()
return $?
}
+function lookup()
+{
+ ls $1
+ if [ "$?" == "0" ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
cleanup;
TESTS_EXPECTED_IN_LOOP=10
@@ -179,7 +190,9 @@ TEST fd_close $fd3;
# test 73
TEST $CLI volume set $V0 "features.snapshot-directory" .history
-TEST ls $M0/.history;
+#snapd client might take fraction of time to compare the volfile from glusterd
+#hence a EXPECT_WITHIN is a better choice here
+EXPECT_WITHIN 2 "Y" lookup "$M0/.history";
NUM_SNAPS=$(ls $M0/.history | wc -l);
diff --git a/tests/basic/volume-status.t b/tests/basic/volume-status.t
index bd85ab659bc..7d1b8326e3c 100644
--- a/tests/basic/volume-status.t
+++ b/tests/basic/volume-status.t
@@ -14,7 +14,8 @@ TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
TEST $CLI volume start $V0;
-sleep 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status
## Mount FUSE
TEST $GFS -s $H0 --volfile-id $V0 $M0;
@@ -28,8 +29,6 @@ TEST mount_nfs $H0:/$V0 $N0 nolock;
TEST $CLI volume status all
TEST $CLI volume status $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' nfs_up_status
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' glustershd_up_status
function test_nfs_cmds () {
local ret=0
declare -a nfs_cmds=("clients" "mem" "inode" "callpool")
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index 2215af71065..24d2579c64e 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -11,7 +11,11 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-syncop.c glusterd-hooks.c glusterd-volume-set.c \
glusterd-locks.c glusterd-snapshot.c glusterd-mgmt-handler.c \
glusterd-mgmt.c glusterd-peer-utils.c glusterd-statedump.c \
- glusterd-snapshot-utils.c
+ glusterd-snapshot-utils.c glusterd-conn-mgmt.c \
+ glusterd-proc-mgmt.c glusterd-svc-mgmt.c glusterd-shd-svc.c \
+ glusterd-nfs-svc.c glusterd-quotad-svc.c glusterd-svc-helper.c \
+ glusterd-conn-helper.c glusterd-snapd-svc.c glusterd-snapd-svc-helper.c
+
glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la \
@@ -26,7 +30,11 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
glusterd-pmap.h glusterd-volgen.h glusterd-mountbroker.h \
glusterd-syncop.h glusterd-hooks.h glusterd-locks.h \
glusterd-mgmt.h glusterd-messages.h glusterd-peer-utils.h \
- glusterd-statedump.h glusterd-snapshot-utils.h glusterd-geo-rep.h
+ glusterd-statedump.h glusterd-snapshot-utils.h glusterd-geo-rep.h \
+ glusterd-conn-mgmt.h glusterd-conn-helper.h glusterd-proc-mgmt.h \
+ glusterd-svc-mgmt.h glusterd-shd-svc.h glusterd-nfs-svc.h \
+ glusterd-quotad-svc.h glusterd-svc-helper.h glusterd-snapd-svc.h \
+ glusterd-snapd-svc-helper.h
AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(rpclibdir) -I$(CONTRIBDIR)/rbtree \
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 72aaff58ffc..7673fa83433 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -21,6 +21,7 @@
#include "glusterd-store.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
+#include "glusterd-svc-helper.h"
#include "run.h"
#include <sys/signal.h>
@@ -1819,7 +1820,7 @@ glusterd_op_add_brick (dict_t *dict, char **op_errstr)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_nodesvcs_handle_graph_change (volinfo);
+ ret = glusterd_svcs_manager (volinfo);
out:
return ret;
@@ -2074,7 +2075,7 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
if (GF_OP_CMD_START == cmd &&
volinfo->status == GLUSTERD_STATUS_STARTED) {
- ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
+ ret = glusterd_svcs_reconfigure (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
"Unable to reconfigure NFS-Server");
@@ -2106,7 +2107,7 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
}
} else {
if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_nodesvcs_handle_graph_change (volinfo);
+ ret = glusterd_svcs_manager (volinfo);
}
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-helper.c b/xlators/mgmt/glusterd/src/glusterd-conn-helper.c
new file mode 100644
index 00000000000..43c95c3a386
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-helper.c
@@ -0,0 +1,18 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "glusterd-conn-mgmt.h"
+#include "glusterd-svc-mgmt.h"
+
+glusterd_svc_t *
+glusterd_conn_get_svc_object (glusterd_conn_t *conn)
+{
+ return list_entry (conn, glusterd_svc_t, conn);
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-helper.h b/xlators/mgmt/glusterd/src/glusterd-conn-helper.h
new file mode 100644
index 00000000000..2431c316fd4
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-helper.h
@@ -0,0 +1,26 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_CONN_HELPER_H_
+#define _GLUSTERD_CONN_HELPER_H_
+
+#include "rpc-clnt.h"
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterd-conn-mgmt.h"
+
+glusterd_svc_t *
+glusterd_conn_get_svc_object (glusterd_conn_t *conn);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
new file mode 100644
index 00000000000..662aba6e724
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
@@ -0,0 +1,135 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "xlator.h"
+#include "rpc-clnt.h"
+#include "glusterd.h"
+#include "glusterd-conn-mgmt.h"
+#include "glusterd-conn-helper.h"
+#include "glusterd-utils.h"
+
+int
+glusterd_conn_init (glusterd_conn_t *conn, char *sockpath,
+ int frame_timeout, glusterd_conn_notify_t notify)
+{
+ int ret = -1;
+ dict_t *options = NULL;
+ struct rpc_clnt *rpc = NULL;
+ xlator_t *this = THIS;
+ glusterd_svc_t *svc = NULL;
+
+ if (!this)
+ goto out;
+
+ svc = glusterd_conn_get_svc_object (conn);
+ if (!svc) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get the service");
+ goto out;
+ }
+
+ ret = rpc_transport_unix_options_build (&options, sockpath,
+ frame_timeout);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (options, "transport.socket.ignore-enoent", "on");
+ if (ret)
+ goto out;
+
+ /* @options is free'd by rpc_transport when destroyed */
+ rpc = rpc_clnt_new (options, this->ctx, (char *)svc->name, 16);
+ if (!rpc) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = rpc_clnt_register_notify (rpc, glusterd_conn_common_notify,
+ conn);
+ if (ret)
+ goto out;
+
+ ret = snprintf (conn->sockpath, sizeof (conn->sockpath), "%s",
+ sockpath);
+ if (ret < 0)
+ goto out;
+ else
+ ret = 0;
+
+ conn->frame_timeout = frame_timeout;
+ conn->rpc = rpc;
+ conn->notify = notify;
+out:
+ if (ret) {
+ if (rpc) {
+ rpc_clnt_unref (rpc);
+ rpc = NULL;
+ }
+ }
+ return ret;
+}
+
+int
+glusterd_conn_term (glusterd_conn_t *conn)
+{
+ rpc_clnt_disable (conn->rpc);
+ rpc_clnt_unref (conn->rpc);
+ return 0;
+}
+
+int
+glusterd_conn_connect (glusterd_conn_t *conn)
+{
+ return rpc_clnt_start (conn->rpc);
+}
+
+int
+glusterd_conn_disconnect (glusterd_conn_t *conn)
+{
+ rpc_clnt_disconnect (conn->rpc);
+
+ return 0;
+}
+
+
+int
+__glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ glusterd_conn_t *conn = mydata;
+
+ /* Silently ignoring this error, exactly like the current
+ * implementation */
+ if (!conn)
+ return 0;
+
+ return conn->notify (conn, event);
+}
+
+int
+glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ return glusterd_big_locked_notify
+ (rpc, mydata, event, data,
+ __glusterd_conn_common_notify);
+}
+
+int32_t
+glusterd_conn_build_socket_filepath (char *rundir, uuid_t uuid,
+ char *socketpath, int len)
+{
+ char sockfilepath[PATH_MAX] = {0,};
+
+ snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s",
+ rundir, uuid_utoa (uuid));
+
+ glusterd_set_socket_filepath (sockfilepath, socketpath, len);
+ return 0;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
new file mode 100644
index 00000000000..6a058e36c94
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
@@ -0,0 +1,56 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_CONN_MGMT_H_
+#define _GLUSTERD_CONN_MGMT_H_
+
+#include "rpc-clnt.h"
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+typedef struct glusterd_conn_ glusterd_conn_t;
+
+typedef int (*glusterd_conn_notify_t)
+ (glusterd_conn_t *conn, rpc_clnt_event_t event);
+
+struct glusterd_conn_ {
+ struct rpc_clnt *rpc;
+ char sockpath[PATH_MAX];
+ int frame_timeout;
+ /* Existing daemons tend to specialize their respective
+ * notify implementations, so ... */
+ glusterd_conn_notify_t notify;
+};
+
+int
+glusterd_conn_init (glusterd_conn_t *conn, char *sockpath,
+ int frame_timeout, glusterd_conn_notify_t notify);
+
+int
+glusterd_conn_term (glusterd_conn_t *conn);
+
+int
+glusterd_conn_connect (glusterd_conn_t *conn);
+
+int
+glusterd_conn_disconnect (glusterd_conn_t *conn);
+
+int
+glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data);
+
+int32_t
+glusterd_conn_build_socket_filepath (char *rundir, uuid_t uuid,
+ char *socketpath, int len);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
index 31dd5a481d4..797850743bb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
@@ -21,6 +21,7 @@
#include "glusterd-store.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
+#include "glusterd-svc-helper.h"
#include "run.h"
#include "syscall.h"
@@ -3903,7 +3904,7 @@ glusterd_marker_changelog_create_volfile (glusterd_volinfo_t *volinfo)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_nodesvcs_handle_graph_change (volinfo);
+ ret = glusterd_svcs_manager (volinfo);
ret = 0;
out:
return ret;
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 803a13c8393..8ba16b7a804 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -64,10 +64,12 @@ int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
void *data, rpc_clnt_notify_t notify_fn)
{
glusterd_conf_t *priv = THIS->private;
- int ret = -1;
+ int ret = -1;
+
synclock_lock (&priv->big_lock);
ret = notify_fn (rpc, mydata, event, data);
synclock_unlock (&priv->big_lock);
+
return ret;
}
@@ -4411,116 +4413,6 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
}
int
-__glusterd_snapd_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
-{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- int ret = 0;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- volinfo = mydata;
- if (!volinfo)
- return 0;
-
- switch (event) {
- case RPC_CLNT_CONNECT:
- gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT");
-
- (void) glusterd_snapd_set_online_status (volinfo, _gf_true);
-
- break;
-
- case RPC_CLNT_DISCONNECT:
- if (glusterd_is_snapd_online (volinfo)) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_NODE_DISCONNECTED,
- "snapd for volume %s has disconnected from "
- "glusterd.", volinfo->volname);
-
- (void) glusterd_snapd_set_online_status
- (volinfo, _gf_false);
- }
- break;
-
- case RPC_CLNT_DESTROY:
- glusterd_volinfo_unref (volinfo);
- break;
-
- default:
- gf_log (this->name, GF_LOG_TRACE,
- "got some other RPC event %d", event);
- break;
- }
-
- return ret;
-}
-
-int
-glusterd_snapd_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
-{
- return glusterd_big_locked_notify (rpc, mydata, event, data,
- __glusterd_snapd_rpc_notify);
-}
-
-int
-__glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
-{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- char *server = NULL;
- int ret = 0;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- server = mydata;
- if (!server)
- return 0;
-
- switch (event) {
- case RPC_CLNT_CONNECT:
- gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_CONNECT");
- (void) glusterd_nodesvc_set_online_status (server, _gf_true);
- ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
-
- break;
-
- case RPC_CLNT_DISCONNECT:
- if (glusterd_is_nodesvc_online (server)) {
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_NODE_DISCONNECTED,
- "%s has disconnected from glusterd.", server);
- (void) glusterd_nodesvc_set_online_status (server, _gf_false);
- }
- break;
-
- default:
- gf_log (this->name, GF_LOG_TRACE,
- "got some other RPC event %d", event);
- break;
- }
-
- return ret;
-}
-
-int
-glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
-{
- return glusterd_big_locked_notify (rpc, mydata, event, data,
- __glusterd_nodesvc_rpc_notify);
-}
-
-int
glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx)
{
int ret = -1;
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index e6921c87b89..4f19d00a3d7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -23,6 +23,9 @@
#include "glusterd-op-sm.h"
#include "glusterd-store.h"
#include "glusterd-snapshot-utils.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-snapd-svc-helper.h"
+#include "glusterd-quotad-svc.h"
#include "glusterfs3.h"
#include "protocol-common.h"
@@ -187,7 +190,7 @@ build_volfile_path (char *volume_id, char *path,
"Couldn't find volinfo");
goto out;
}
- glusterd_get_snapd_volfile (volinfo, path, path_len);
+ glusterd_svc_build_snapd_volfile (volinfo, path, path_len);
ret = 0;
goto out;
@@ -202,8 +205,14 @@ build_volfile_path (char *volume_id, char *path,
}
volid_ptr++;
- glusterd_get_nodesvc_volfile (volid_ptr, priv->workdir,
- path, path_len);
+ if (strcmp (volid_ptr, "quotad") == 0)
+ glusterd_quotadsvc_build_volfile_path (volid_ptr,
+ priv->workdir,
+ path, path_len);
+ else
+ glusterd_svc_build_volfile_path (volid_ptr,
+ priv->workdir,
+ path, path_len);
ret = 0;
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
new file mode 100644
index 00000000000..91cbae7bda6
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
@@ -0,0 +1,256 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "globals.h"
+#include "run.h"
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-volgen.h"
+#include "glusterd-nfs-svc.h"
+
+char *nfs_svc_name = "nfs";
+
+int
+glusterd_nfssvc_init (glusterd_svc_t *svc)
+{
+ return glusterd_svc_init (svc, nfs_svc_name,
+ glusterd_nfssvc_manager,
+ glusterd_nfssvc_start,
+ glusterd_nfssvc_stop);
+}
+
+static int
+glusterd_nfssvc_create_volfile ()
+{
+ char filepath[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = THIS->private;
+
+ glusterd_svc_build_volfile_path (nfs_svc_name, conf->workdir,
+ filepath, sizeof (filepath));
+ return glusterd_create_global_volfile (build_nfs_graph,
+ filepath, NULL);
+}
+
+static int
+glusterd_nfssvc_check_volfile_identical (gf_boolean_t *identical)
+{
+ char nfsvol[PATH_MAX] = {0,};
+ char tmpnfsvol[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ int ret = -1;
+ int need_unlink = 0;
+ int tmp_fd = -1;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (identical);
+ conf = this->private;
+
+ glusterd_svc_build_volfile_path (nfs_svc_name, conf->workdir,
+ nfsvol, sizeof (nfsvol));
+
+ snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
+
+ tmp_fd = mkstemp (tmpnfsvol);
+ if (tmp_fd < 0) {
+ gf_log (this->name, GF_LOG_WARNING, "Unable to create temp file"
+ " %s:(%s)", tmpnfsvol, strerror (errno));
+ goto out;
+ }
+
+ need_unlink = 1;
+
+ ret = glusterd_create_global_volfile (build_nfs_graph,
+ tmpnfsvol, NULL);
+ if (ret)
+ goto out;
+
+ ret = glusterd_check_files_identical (nfsvol, tmpnfsvol,
+ identical);
+ if (ret)
+ goto out;
+
+out:
+ if (need_unlink)
+ unlink (tmpnfsvol);
+
+ if (tmp_fd >= 0)
+ close (tmp_fd);
+
+ return ret;
+}
+
+static int
+glusterd_nfssvc_check_topology_identical (gf_boolean_t *identical)
+{
+ char nfsvol[PATH_MAX] = {0,};
+ char tmpnfsvol[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = THIS;
+ int ret = -1;
+ int tmpclean = 0;
+ int tmpfd = -1;
+
+ if ((!identical) || (!this) || (!this->private))
+ goto out;
+
+ conf = (glusterd_conf_t *) this->private;
+ GF_ASSERT (conf);
+
+ /* Fetch the original NFS volfile */
+ glusterd_svc_build_volfile_path (conf->nfs_svc.name, conf->workdir,
+ nfsvol, sizeof (nfsvol));
+
+ /* Create the temporary NFS volfile */
+ snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
+ tmpfd = mkstemp (tmpnfsvol);
+ if (tmpfd < 0) {
+ gf_log (this->name, GF_LOG_WARNING, "Unable to create temp file"
+ " %s: (%s)", tmpnfsvol, strerror (errno));
+ goto out;
+ }
+
+ tmpclean = 1; /* SET the flag to unlink() tmpfile */
+
+ ret = glusterd_create_global_volfile (build_nfs_graph,
+ tmpnfsvol, NULL);
+ if (ret)
+ goto out;
+
+ /* Compare the topology of volfiles */
+ ret = glusterd_check_topology_identical (nfsvol, tmpnfsvol,
+ identical);
+out:
+ if (tmpfd >= 0)
+ close (tmpfd);
+ if (tmpclean)
+ unlink (tmpnfsvol);
+ return ret;
+}
+
+int
+glusterd_nfssvc_manager (glusterd_svc_t *svc, void *data, int flags)
+{
+ int ret = -1;
+
+ if (glusterd_are_all_volumes_stopped ()) {
+ ret = svc->stop (svc, SIGKILL);
+ } else {
+ ret = glusterd_nfssvc_create_volfile ();
+ if (ret)
+ goto out;
+
+ ret = svc->stop (svc, SIGKILL);
+ if (ret)
+ goto out;
+
+ ret = svc->start (svc, flags);
+ if (ret)
+ goto out;
+
+ ret = glusterd_conn_connect (&(svc->conn));
+ if (ret)
+ goto out;
+ }
+out:
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_nfssvc_start (glusterd_svc_t *svc, int flags)
+{
+ return glusterd_svc_start (svc, flags, NULL);
+}
+
+int
+glusterd_nfssvc_stop (glusterd_svc_t *svc, int sig)
+{
+ int ret = -1;
+ gf_boolean_t deregister = _gf_false;
+
+ if (glusterd_proc_is_running (&(svc->proc)))
+ deregister = _gf_true;
+
+ ret = glusterd_svc_stop (svc, sig);
+ if (ret)
+ goto out;
+ if (deregister)
+ glusterd_nfs_pmap_deregister ();
+
+out:
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_nfssvc_reconfigure ()
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t identical = _gf_false;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ /*
+ * Check both OLD and NEW volfiles, if they are SAME by size
+ * and cksum i.e. "character-by-character". If YES, then
+ * NOTHING has been changed, just return.
+ */
+ ret = glusterd_nfssvc_check_volfile_identical (&identical);
+ if (ret)
+ goto out;
+
+ if (identical) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * They are not identical. Find out if the topology is changed
+ * OR just the volume options. If just the options which got
+ * changed, then inform the xlator to reconfigure the options.
+ */
+ identical = _gf_false; /* RESET the FLAG */
+ ret = glusterd_nfssvc_check_topology_identical (&identical);
+ if (ret)
+ goto out;
+
+ /* Topology is not changed, but just the options. But write the
+ * options to NFS volfile, so that NFS will be reconfigured.
+ */
+ if (identical) {
+ ret = glusterd_nfssvc_create_volfile();
+ if (ret == 0) {/* Only if above PASSES */
+ ret = glusterd_fetchspec_notify (THIS);
+ }
+ goto out;
+ }
+
+ /*
+ * NFS volfile's topology has been changed. NFS server needs
+ * to be RESTARTED to ACT on the changed volfile.
+ */
+ ret = priv->nfs_svc.manager (&(priv->nfs_svc), NULL,
+ PROC_START_NO_WAIT);
+
+out:
+ return ret;
+
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.h b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.h
new file mode 100644
index 00000000000..210336cde83
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.h
@@ -0,0 +1,36 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_NFS_SVC_H_
+#define _GLUSTERD_NFS_SVC_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterd-svc-mgmt.h"
+
+int
+glusterd_nfssvc_init (glusterd_svc_t *svc);
+
+int
+glusterd_nfssvc_manager (glusterd_svc_t *svc, void *data, int flags);
+
+int
+glusterd_nfssvc_start (glusterd_svc_t *svc, int flags);
+
+int
+glusterd_nfssvc_stop (glusterd_svc_t *svc, int sig);
+
+int
+glusterd_nfssvc_reconfigure ();
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 93585e3db0c..185454f0b1c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -44,6 +44,11 @@
#include "common-utils.h"
#include "run.h"
#include "glusterd-snapshot-utils.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-svc-helper.h"
+#include "glusterd-shd-svc.h"
+#include "glusterd-nfs-svc.h"
+#include "glusterd-quotad-svc.h"
#include <sys/types.h>
#include <signal.h>
@@ -390,13 +395,6 @@ glusterd_set_volume_status (glusterd_volinfo_t *volinfo,
volinfo->status = status;
}
-gf_boolean_t
-glusterd_is_volume_started (glusterd_volinfo_t *volinfo)
-{
- GF_ASSERT (volinfo);
- return (volinfo->status == GLUSTERD_STATUS_STARTED);
-}
-
static int
glusterd_op_sm_inject_all_acc (uuid_t *txn_id)
{
@@ -1526,7 +1524,7 @@ glusterd_options_reset (glusterd_volinfo_t *volinfo, char *key,
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
+ ret = glusterd_svcs_reconfigure (volinfo);
if (ret)
goto out;
}
@@ -1878,7 +1876,8 @@ glusterd_op_set_volume (dict_t *dict)
int32_t dict_count = 0;
gf_boolean_t check_op_version = _gf_false;
uint32_t new_op_version = 0;
- gf_boolean_t quorum_action = _gf_false;
+ gf_boolean_t quorum_action = _gf_false;
+ glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT (this);
@@ -2043,14 +2042,15 @@ glusterd_op_set_volume (dict_t *dict)
goto out;
}
}
-
if (!global_opts_set) {
gd_update_volume_op_versions (volinfo);
- ret = glusterd_handle_snapd_option (volinfo);
- if (ret)
- goto out;
-
+ if (!volinfo->is_snap_volume) {
+ svc = &(volinfo->snapd.svc);
+ ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT);
+ if (ret)
+ goto out;
+ }
ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
@@ -2065,10 +2065,10 @@ glusterd_op_set_volume (dict_t *dict)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
+ ret = glusterd_svcs_reconfigure (volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_WARNING,
- "Unable to restart NFS-Server");
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to restart services");
goto out;
}
}
@@ -2078,9 +2078,13 @@ glusterd_op_set_volume (dict_t *dict)
volinfo = voliter;
gd_update_volume_op_versions (volinfo);
- ret = glusterd_handle_snapd_option (volinfo);
- if (ret)
- goto out;
+ if (!volinfo->is_snap_volume) {
+ svc = &(volinfo->snapd.svc);
+ ret = svc->manager (svc, volinfo,
+ PROC_START_NO_WAIT);
+ if (ret)
+ goto out;
+ }
ret = glusterd_create_volfiles_and_notify_services (volinfo);
if (ret) {
@@ -2097,7 +2101,7 @@ glusterd_op_set_volume (dict_t *dict)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
+ ret = glusterd_svcs_reconfigure (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
"Unable to restart NFS-Server");
@@ -2303,7 +2307,7 @@ glusterd_op_stats_volume (dict_t *dict, char **op_errstr,
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_nodesvcs_handle_reconfigure (volinfo);
+ ret = glusterd_svcs_reconfigure (volinfo);
ret = 0;
@@ -2614,23 +2618,24 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
vol_opts = volinfo->dict;
if ((cmd & GF_CLI_STATUS_NFS) != 0) {
- ret = glusterd_add_node_to_dict ("nfs", rsp_dict, 0, vol_opts);
+ ret = glusterd_add_node_to_dict (priv->nfs_svc.name, rsp_dict,
+ 0, vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- ret = glusterd_add_node_to_dict ("glustershd", rsp_dict, 0,
- vol_opts);
+ ret = glusterd_add_node_to_dict (priv->shd_svc.name, rsp_dict,
+ 0, vol_opts);
if (ret)
goto out;
other_count++;
node_count++;
} else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
- ret = glusterd_add_node_to_dict ("quotad", rsp_dict, 0,
- vol_opts);
+ ret = glusterd_add_node_to_dict (priv->quotad_svc.name,
+ rsp_dict, 0, vol_opts);
if (ret)
goto out;
other_count++;
@@ -2703,10 +2708,11 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
"nfs.disable",
_gf_false);
if (!nfs_disabled) {
- ret = glusterd_add_node_to_dict ("nfs",
- rsp_dict,
- other_index,
- vol_opts);
+ ret = glusterd_add_node_to_dict
+ (priv->nfs_svc.name,
+ rsp_dict,
+ other_index,
+ vol_opts);
if (ret)
goto out;
other_index++;
@@ -2719,10 +2725,9 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
_gf_true);
if (glusterd_is_volume_replicate (volinfo)
&& shd_enabled) {
- ret = glusterd_add_node_to_dict ("glustershd",
- rsp_dict,
- other_index,
- vol_opts);
+ ret = glusterd_add_node_to_dict
+ (priv->shd_svc.name, rsp_dict,
+ other_index, vol_opts);
if (ret)
goto out;
other_count++;
@@ -2730,10 +2735,11 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
other_index++;
}
if (glusterd_is_volume_quota_enabled (volinfo)) {
- ret = glusterd_add_node_to_dict ("quotad",
- rsp_dict,
- other_index,
- vol_opts);
+ ret = glusterd_add_node_to_dict
+ (priv->quotad_svc.name,
+ rsp_dict,
+ other_index,
+ vol_opts);
if (ret)
goto out;
other_count++;
@@ -5122,7 +5128,7 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATS_INFO:
ret = dict_get_str_boolean (dict, "nfs", _gf_false);
if (ret) {
- if (!glusterd_is_nodesvc_online ("nfs")) {
+ if (!priv->nfs_svc.online) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR, "NFS server"
" is not running");
@@ -5134,7 +5140,7 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- pending_node->node = priv->nfs;
+ pending_node->node = &(priv->nfs_svc);
pending_node->type = GD_NODE_NFS;
list_add_tail (&pending_node->list, selected);
pending_node = NULL;
@@ -5164,7 +5170,7 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATS_TOP:
ret = dict_get_str_boolean (dict, "nfs", _gf_false);
if (ret) {
- if (!glusterd_is_nodesvc_online ("nfs")) {
+ if (!priv->nfs_svc.online) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR, "NFS server"
" is not running");
@@ -5176,7 +5182,7 @@ glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- pending_node->node = priv->nfs;
+ pending_node->node = &(priv->nfs_svc);
pending_node->type = GD_NODE_NFS;
list_add_tail (&pending_node->list, selected);
pending_node = NULL;
@@ -5581,7 +5587,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
switch (heal_op) {
case GF_AFR_OP_INDEX_SUMMARY:
case GF_AFR_OP_STATISTICS_HEAL_COUNT:
- if (!glusterd_is_nodesvc_online ("glustershd")) {
+ if (!priv->shd_svc.online) {
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR, "Received "
"empty ctx.");
@@ -5601,7 +5607,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
}
break;
case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
- if (!glusterd_is_nodesvc_online ("glustershd")) {
+ if (!priv->shd_svc.online) {
if (!rsp_dict) {
gf_log (this->name, GF_LOG_ERROR, "Received "
"empty ctx.");
@@ -5662,7 +5668,7 @@ glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
} else {
- pending_node->node = priv->shd;
+ pending_node->node = &(priv->shd_svc);
pending_node->type = GD_NODE_SHD;
list_add_tail (&pending_node->list, selected);
pending_node = NULL;
@@ -5734,7 +5740,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
glusterd_pending_node_t *pending_node = NULL;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
- glusterd_snapd_t *snapd = NULL;
+ glusterd_snapdsvc_t *snapd = NULL;
GF_ASSERT (dict);
@@ -5806,7 +5812,7 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = 0;
} else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
- if (!glusterd_is_nodesvc_online ("nfs")) {
+ if (!priv->nfs_svc.online) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR,
"NFS server is not running");
@@ -5818,14 +5824,14 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- pending_node->node = priv->nfs;
+ pending_node->node = &(priv->nfs_svc);
pending_node->type = GD_NODE_NFS;
pending_node->index = 0;
list_add_tail (&pending_node->list, selected);
ret = 0;
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- if (!glusterd_is_nodesvc_online ("glustershd")) {
+ if (!priv->shd_svc.online) {
ret = -1;
gf_log (this->name, GF_LOG_ERROR,
"Self-heal daemon is not running");
@@ -5837,14 +5843,14 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- pending_node->node = priv->shd;
+ pending_node->node = &(priv->shd_svc);
pending_node->type = GD_NODE_SHD;
pending_node->index = 0;
list_add_tail (&pending_node->list, selected);
ret = 0;
} else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
- if (!glusterd_is_nodesvc_online ("quotad")) {
+ if (!priv->quotad_svc.online) {
gf_log (this->name, GF_LOG_ERROR, "Quotad is not "
"running");
ret = -1;
@@ -5856,14 +5862,14 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- pending_node->node = priv->quotad;
+ pending_node->node = &(priv->quotad_svc);
pending_node->type = GD_NODE_QUOTAD;
pending_node->index = 0;
list_add_tail (&pending_node->list, selected);
ret = 0;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
- if (!glusterd_is_snapd_online (volinfo)) {
+ if (!volinfo->snapd.svc.online) {
gf_log (this->name, GF_LOG_ERROR, "snapd is not "
"running");
ret = -1;
diff --git a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
new file mode 100644
index 00000000000..d6088a398fa
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c
@@ -0,0 +1,134 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <stdio.h>
+#include <limits.h>
+#include <signal.h>
+
+#include "common-utils.h"
+#include "xlator.h"
+#include "logging.h"
+#include "glusterd-messages.h"
+#include "glusterd-proc-mgmt.h"
+
+int
+glusterd_proc_init (glusterd_proc_t *proc, char *name, char *pidfile,
+ char *logdir, char *logfile, char *volfile, char *volfileid,
+ char *volfileserver)
+{
+ int ret = -1;
+
+ ret = snprintf (proc->name, sizeof (proc->name), "%s", name);
+ if (ret < 0)
+ goto out;
+
+ ret = snprintf (proc->pidfile, sizeof (proc->pidfile), "%s", pidfile);
+ if (ret < 0)
+ goto out;
+
+ ret = snprintf (proc->logdir, sizeof (proc->logdir), "%s", logdir);
+ if (ret < 0)
+ goto out;
+
+ ret = snprintf (proc->logfile, sizeof (proc->logfile), "%s", logfile);
+ if (ret < 0)
+ goto out;
+
+ ret = snprintf (proc->volfile, sizeof (proc->volfile), "%s", volfile);
+ if (ret < 0)
+ goto out;
+
+ ret = snprintf (proc->volfileid, sizeof (proc->volfileid), "%s",
+ volfileid);
+ if (ret < 0)
+ goto out;
+
+ ret = snprintf (proc->volfileserver, sizeof (proc->volfileserver), "%s",
+ volfileserver);
+ if (ret < 0)
+ goto out;
+
+out:
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+int
+glusterd_proc_stop (glusterd_proc_t *proc, int sig, int flags)
+{
+
+ /* NB: Copy-paste code from glusterd_service_stop, the source may be
+ * removed once all daemon management use proc */
+
+ int32_t ret = -1;
+ pid_t pid = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ if (!gf_is_service_running (proc->pidfile, &pid)) {
+ ret = 0;
+ gf_log (this->name, GF_LOG_INFO, "%s already stopped",
+ proc->name);
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_DEBUG, "Stopping %s daemon running in pid: "
+ "%d", proc->name, pid);
+
+ ret = kill (pid, sig);
+ if (ret) {
+ switch (errno) {
+ case ESRCH:
+ gf_log (this->name, GF_LOG_DEBUG, "%s is already "
+ "stopped", proc->name);
+ ret = 0;
+ goto out;
+ default:
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_SVC_KILL_FAIL, "Unable to kill %s "
+ "service, reason:%s", proc->name,
+ strerror (errno));
+ }
+ }
+ if (flags != PROC_STOP_FORCE)
+ goto out;
+
+ sleep (1);
+ if (gf_is_service_running (proc->pidfile, NULL)) {
+ ret = kill (pid, SIGKILL);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, errno,
+ GD_MSG_PID_KILL_FAIL, "Unable to kill pid:%d, "
+ "reason:%s", pid, strerror(errno));
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_proc_get_pid (glusterd_proc_t *proc)
+{
+ int pid = -1;
+ (void) gf_is_service_running (proc->pidfile, &pid);
+ return pid;
+}
+
+int
+glusterd_proc_is_running (glusterd_proc_t *proc)
+{
+ return gf_is_service_running (proc->pidfile, NULL);
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h
new file mode 100644
index 00000000000..9485d78d03e
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.h
@@ -0,0 +1,49 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_PROC_MGMT_H_
+#define _GLUSTERD_PROC_MGMT_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+typedef struct glusterd_proc_ glusterd_proc_t;
+
+enum proc_flags {
+ PROC_NONE = 0,
+ PROC_START,
+ PROC_START_NO_WAIT,
+ PROC_STOP,
+ PROC_STOP_FORCE
+};
+
+struct glusterd_proc_ {
+ char name[PATH_MAX];
+ char pidfile[PATH_MAX];
+ char logdir[PATH_MAX];
+ char logfile[PATH_MAX];
+ char volfile[PATH_MAX];
+ char volfileserver[PATH_MAX];
+ char volfileid[256];
+};
+
+int
+glusterd_proc_init (glusterd_proc_t *proc, char *name, char *pidfile,
+ char *logdir, char *logfile, char *volfile, char *volfileid,
+ char *volfileserver);
+
+int
+glusterd_proc_stop (glusterd_proc_t *proc, int sig, int flags);
+
+int
+glusterd_proc_is_running (glusterd_proc_t *proc);
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-quota.c b/xlators/mgmt/glusterd/src/glusterd-quota.c
index 5e15fde082a..48dc4b7b39e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-quota.c
+++ b/xlators/mgmt/glusterd/src/glusterd-quota.c
@@ -19,6 +19,8 @@
#include "glusterd-op-sm.h"
#include "glusterd-store.h"
#include "glusterd-utils.h"
+#include "glusterd-nfs-svc.h"
+#include "glusterd-quotad-svc.h"
#include "glusterd-volgen.h"
#include "run.h"
#include "syscall.h"
@@ -1007,17 +1009,27 @@ glusterd_set_quota_option (glusterd_volinfo_t *volinfo, dict_t *dict,
static int
glusterd_quotad_op (int opcode)
{
- int ret = -1;
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
switch (opcode) {
case GF_QUOTA_OPTION_TYPE_ENABLE:
case GF_QUOTA_OPTION_TYPE_DISABLE:
if (glusterd_all_volumes_with_quota_stopped ())
- ret = glusterd_quotad_stop ();
+ ret = glusterd_svc_stop (&(priv->quotad_svc),
+ SIGTERM);
else
- ret = glusterd_check_generate_start_quotad_wait
- ();
+ ret = priv->quotad_svc.manager
+ (&(priv->quotad_svc), NULL,
+ PROC_START);
break;
default:
@@ -1167,7 +1179,7 @@ glusterd_op_quota (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
if (priv->op_version == GD_OP_VERSION_MIN)
- ret = glusterd_check_generate_start_nfs ();
+ ret = priv->nfs_svc.manager (&(priv->nfs_svc), NULL, 0);
}
if (rsp_dict && start_crawl == _gf_true)
diff --git a/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c
new file mode 100644
index 00000000000..bd77a72c5ab
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c
@@ -0,0 +1,158 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "globals.h"
+#include "run.h"
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-volgen.h"
+#include "glusterd-quotad-svc.h"
+
+char *quotad_svc_name = "quotad";
+
+int glusterd_quotadsvc_init (glusterd_svc_t *svc)
+{
+ int ret = -1;
+ char volfile[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = THIS->private;
+
+ ret = glusterd_svc_init (svc, quotad_svc_name,
+ glusterd_quotadsvc_manager,
+ glusterd_quotadsvc_start,
+ glusterd_svc_stop);
+ if (ret)
+ goto out;
+
+ /* glusterd_svc_build_volfile_path () doesn't put correct quotad volfile
+ * path in proc object at service initialization. Re-initialize
+ * the correct path
+ */
+ glusterd_quotadsvc_build_volfile_path (quotad_svc_name, conf->workdir,
+ volfile, sizeof (volfile));
+ snprintf (svc->proc.volfile, sizeof (svc->proc.volfile), "%s", volfile);
+out:
+ return ret;
+}
+
+static int
+glusterd_quotadsvc_create_volfile ()
+{
+ char filepath[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = THIS->private;
+
+ glusterd_quotadsvc_build_volfile_path (quotad_svc_name, conf->workdir,
+ filepath, sizeof (filepath));
+ return glusterd_create_global_volfile (build_quotad_graph,
+ filepath, NULL);
+}
+
+int
+glusterd_quotadsvc_manager (glusterd_svc_t *svc, void *data, int flags)
+{
+ int ret = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ volinfo = data;
+
+ /* If all the volumes are stopped or all shd compatible volumes
+ * are stopped then stop the service if:
+ * - volinfo is NULL or
+ * - volinfo is present and volume is shd compatible
+ * Otherwise create volfile and restart service if:
+ * - volinfo is NULL or
+ * - volinfo is present and volume is shd compatible
+ */
+ if (glusterd_are_all_volumes_stopped () ||
+ glusterd_all_volumes_with_quota_stopped ()) {
+ if (!(volinfo && !glusterd_is_volume_quota_enabled (volinfo))) {
+ ret = svc->stop (svc, SIGTERM);
+ }
+ } else {
+ if (!(volinfo && !glusterd_is_volume_quota_enabled (volinfo))) {
+ ret = glusterd_quotadsvc_create_volfile ();
+ if (ret)
+ goto out;
+
+ ret = svc->stop (svc, SIGTERM);
+ if (ret)
+ goto out;
+
+ ret = svc->start (svc, flags);
+ if (ret)
+ goto out;
+
+ ret = glusterd_conn_connect (&(svc->conn));
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_quotadsvc_start (glusterd_svc_t *svc, int flags)
+{
+ int i = 0;
+ int ret = -1;
+ dict_t *cmdline = NULL;
+ char key[16] = {0};
+ char *options[] = {
+ "*replicate*.entry-self-heal=off",
+ "--xlator-option",
+ "*replicate*.metadata-self-heal=off",
+ "--xlator-option",
+ "*replicate*.data-self-heal=off",
+ "--xlator-option",
+ NULL
+ };
+
+ cmdline = dict_new ();
+ if (!cmdline)
+ goto out;
+
+ for (i = 0; options[i]; i++) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "arg%d", i);
+ ret = dict_set_str (cmdline, key, options[i]);
+ if (ret)
+ goto out;
+ }
+
+ ret = glusterd_svc_start (svc, flags, cmdline);
+
+out:
+ if (cmdline)
+ dict_unref (cmdline);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_quotadsvc_reconfigure ()
+{
+ return glusterd_svc_reconfigure (glusterd_quotadsvc_create_volfile);
+}
+
+void
+glusterd_quotadsvc_build_volfile_path (char *server, char *workdir,
+ char *volfile, size_t len)
+{
+ char dir[PATH_MAX] = {0,};
+
+ GF_ASSERT (len == PATH_MAX);
+
+ glusterd_svc_build_svcdir (server, workdir, dir, sizeof (dir));
+ snprintf (volfile, len, "%s/%s.vol", dir, server);
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-quotad-svc.h b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.h
new file mode 100644
index 00000000000..945d47e68f6
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.h
@@ -0,0 +1,36 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_QUOTAD_SVC_H_
+#define _GLUSTERD_QUOTAD_SVC_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterd-svc-mgmt.h"
+
+int
+glusterd_quotadsvc_init (glusterd_svc_t *svc);
+
+int
+glusterd_quotadsvc_start (glusterd_svc_t *svc, int flags);
+
+int
+glusterd_quotadsvc_manager (glusterd_svc_t *svc, void *data, int flags);
+
+int
+glusterd_quotadsvc_reconfigure ();
+
+void
+glusterd_quotadsvc_build_volfile_path (char *server, char *workdir,
+ char *volfile, size_t len);
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
index ad89d159b46..0c0f171303f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
@@ -21,6 +21,9 @@
#include "glusterd-geo-rep.h"
#include "glusterd-store.h"
#include "glusterd-utils.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-svc-helper.h"
+#include "glusterd-nfs-svc.h"
#include "glusterd-volgen.h"
#include "run.h"
#include "syscall.h"
@@ -668,12 +671,20 @@ rb_src_brick_restart (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *src_brickinfo,
int activate_pump)
{
- int ret = 0;
+ int ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
gf_log ("", GF_LOG_DEBUG,
"Attempting to kill src");
- ret = glusterd_nfs_server_stop (volinfo);
+ ret = priv->nfs_svc.stop (&(priv->nfs_svc), SIGKILL);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to stop nfs, ret: %d",
@@ -717,7 +728,7 @@ rb_src_brick_restart (glusterd_volinfo_t *volinfo,
}
out:
- ret = glusterd_nfs_server_start (volinfo);
+ ret = priv->nfs_svc.start (&(priv->nfs_svc), PROC_START_NO_WAIT);
if (ret) {
gf_log ("", GF_LOG_ERROR, "Unable to start nfs, ret: %d",
ret);
@@ -1771,7 +1782,7 @@ glusterd_op_replace_brick (dict_t *dict, dict_t *rsp_dict)
}
}
- ret = glusterd_nodesvcs_stop (volinfo);
+ ret = glusterd_svcs_stop (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to stop nfs server, ret: %d", ret);
@@ -1783,13 +1794,13 @@ glusterd_op_replace_brick (dict_t *dict, dict_t *rsp_dict)
gf_log (this->name, GF_LOG_CRITICAL, "Unable to add "
"dst-brick: %s to volume: %s", dst_brick,
volinfo->volname);
- (void) glusterd_nodesvcs_handle_graph_change (volinfo);
+ (void) glusterd_svcs_manager (volinfo);
goto out;
}
volinfo->rebal.defrag_status = 0;
- ret = glusterd_nodesvcs_handle_graph_change (volinfo);
+ ret = glusterd_svcs_manager (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_CRITICAL,
"Failed to generate nfs volume file");
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
new file mode 100644
index 00000000000..dd85d1e3638
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
@@ -0,0 +1,167 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "globals.h"
+#include "run.h"
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-volgen.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-shd-svc.h"
+
+char *shd_svc_name = "glustershd";
+
+int
+glusterd_shdsvc_init (glusterd_svc_t *svc)
+{
+ return glusterd_svc_init (svc, shd_svc_name,
+ glusterd_shdsvc_manager,
+ glusterd_shdsvc_start,
+ glusterd_svc_stop);
+}
+
+static int
+glusterd_shdsvc_create_volfile ()
+{
+ char filepath[PATH_MAX] = {0,};
+ int ret = -1;
+ glusterd_conf_t *conf = THIS->private;
+ dict_t *mod_dict = NULL;
+
+ mod_dict = dict_new ();
+ if (!mod_dict)
+ goto out;
+
+ ret = dict_set_uint32 (mod_dict, "cluster.background-self-heal-count",
+ 0);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (mod_dict, "cluster.data-self-heal", "on");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (mod_dict, "cluster.metadata-self-heal", "on");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (mod_dict, "cluster.entry-self-heal", "on");
+ if (ret)
+ goto out;
+
+ glusterd_svc_build_volfile_path (shd_svc_name, conf->workdir,
+ filepath, sizeof (filepath));
+ ret = glusterd_create_global_volfile (build_shd_graph, filepath,
+ mod_dict);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to create volfile");
+ goto out;
+ }
+
+out:
+ if (mod_dict)
+ dict_unref (mod_dict);
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_shdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
+{
+ int ret = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ volinfo = data;
+
+ /* If all the volumes are stopped or all shd compatible volumes
+ * are stopped then stop the service if:
+ * - volinfo is NULL or
+ * - volinfo is present and volume is shd compatible
+ * Otherwise create volfile and restart service if:
+ * - volinfo is NULL or
+ * - volinfo is present and volume is shd compatible
+ */
+ if (glusterd_are_all_volumes_stopped () ||
+ glusterd_all_shd_compatible_volumes_stopped ()) {
+ if (!(volinfo &&
+ !glusterd_is_shd_compatible_volume (volinfo))) {
+ ret = svc->stop (svc, SIGTERM);
+ }
+ } else {
+ if (!(volinfo &&
+ !glusterd_is_shd_compatible_volume (volinfo))) {
+ ret = glusterd_shdsvc_create_volfile ();
+ if (ret)
+ goto out;
+
+ ret = svc->stop (svc, SIGTERM);
+ if (ret)
+ goto out;
+
+ ret = svc->start (svc, flags);
+ if (ret)
+ goto out;
+
+ ret = glusterd_conn_connect (&(svc->conn));
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_shdsvc_start (glusterd_svc_t *svc, int flags)
+{
+ int ret = -1;
+ char glusterd_uuid_option[PATH_MAX] = {0};
+ dict_t *cmdline = NULL;
+
+ cmdline = dict_new ();
+ if (!cmdline)
+ goto out;
+
+ ret = snprintf (glusterd_uuid_option, sizeof (glusterd_uuid_option),
+ "*replicate*.node-uuid=%s", uuid_utoa (MY_UUID));
+ if (ret < 0)
+ goto out;
+
+ /* Pass cmdline arguments as key-value pair. The key is merely
+ * a carrier and is not used. Since dictionary follows LIFO the value
+ * should be put in reverse order*/
+ ret = dict_set_str (cmdline, "arg2", glusterd_uuid_option);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str (cmdline, "arg1", "--xlator-option");
+ if (ret)
+ goto out;
+
+ ret = glusterd_svc_start (svc, flags, cmdline);
+
+out:
+ if (cmdline)
+ dict_unref (cmdline);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+
+int
+glusterd_shdsvc_reconfigure ()
+{
+ return glusterd_svc_reconfigure (glusterd_shdsvc_create_volfile);
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
new file mode 100644
index 00000000000..469ed5d0af2
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
@@ -0,0 +1,32 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_SHD_SVC_H_
+#define _GLUSTERD_SHD_SVC_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterd-svc-mgmt.h"
+
+int
+glusterd_shdsvc_init (glusterd_svc_t *svc);
+
+int
+glusterd_shdsvc_manager (glusterd_svc_t *svc, void *data, int flags);
+
+int
+glusterd_shdsvc_start (glusterd_svc_t *svc, int flags);
+
+int
+glusterd_shdsvc_reconfigure ();
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.c
new file mode 100644
index 00000000000..826b4ca7463
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.c
@@ -0,0 +1,63 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-snapd-svc-helper.h"
+
+void
+glusterd_svc_build_snapd_rundir (glusterd_volinfo_t *volinfo,
+ char *path, int path_len)
+{
+ char workdir[PATH_MAX] = {0,};
+ glusterd_conf_t *priv = THIS->private;
+
+ GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
+
+ snprintf (path, path_len, "%s/run", workdir);
+}
+
+void
+glusterd_svc_build_snapd_socket_filepath (glusterd_volinfo_t *volinfo,
+ char *path, int path_len)
+{
+ char sockfilepath[PATH_MAX] = {0,};
+ char rundir[PATH_MAX] = {0,};
+
+ glusterd_svc_build_snapd_rundir (volinfo, rundir, sizeof (rundir));
+ snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s",
+ rundir, uuid_utoa (MY_UUID));
+
+ glusterd_set_socket_filepath (sockfilepath, path, path_len);
+}
+
+void
+glusterd_svc_build_snapd_pidfile (glusterd_volinfo_t *volinfo,
+ char *path, int path_len)
+{
+ char rundir[PATH_MAX] = {0,};
+
+ glusterd_svc_build_snapd_rundir (volinfo, rundir, sizeof (rundir));
+
+ snprintf (path, path_len, "%s/%s-snapd.pid", rundir, volinfo->volname);
+}
+
+void
+glusterd_svc_build_snapd_volfile (glusterd_volinfo_t *volinfo,
+ char *path, int path_len)
+{
+ char workdir[PATH_MAX] = {0,};
+ glusterd_conf_t *priv = THIS->private;
+
+ GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
+
+ snprintf (path, path_len, "%s/%s-snapd.vol", workdir,
+ volinfo->volname);
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.h
new file mode 100644
index 00000000000..5c15e1a82c7
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.h
@@ -0,0 +1,37 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_SNAPD_SVC_HELPER_H_
+#define _GLUSTERD_SNAPD_SVC_HELPER_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterd.h"
+
+void
+glusterd_svc_build_snapd_rundir (glusterd_volinfo_t *volinfo,
+ char *path, int path_len);
+
+void
+glusterd_svc_build_snapd_socket_filepath (glusterd_volinfo_t *volinfo,
+ char *path, int path_len);
+
+void
+glusterd_svc_build_snapd_pidfile (glusterd_volinfo_t *volinfo,
+ char *path, int path_len);
+
+void
+glusterd_svc_build_snapd_volfile (glusterd_volinfo_t *volinfo,
+ char *path, int path_len);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
new file mode 100644
index 00000000000..7d265d1ac35
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
@@ -0,0 +1,407 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "globals.h"
+#include "run.h"
+#include "glusterd-utils.h"
+#include "glusterd-volgen.h"
+#include "glusterd-messages.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-svc-helper.h"
+#include "glusterd-conn-mgmt.h"
+#include "glusterd-proc-mgmt.h"
+#include "glusterd-snapd-svc.h"
+#include "glusterd-snapd-svc-helper.h"
+#include "glusterd-snapshot-utils.h"
+
+char *snapd_svc_name = "snapd";
+
+static void
+glusterd_svc_build_snapd_logdir (char *logdir, char *volname, size_t len)
+{
+ snprintf (logdir, len, "%s/snaps/%s", DEFAULT_LOG_FILE_DIRECTORY,
+ volname);
+}
+
+static void
+glusterd_svc_build_snapd_logfile (char *logfile, char *logdir, size_t len)
+{
+ snprintf (logfile, len, "%s/snapd.log", logdir);
+}
+
+
+int
+glusterd_snapdsvc_init (void *data)
+{
+ int ret = -1;
+ char rundir[PATH_MAX] = {0,};
+ char sockpath[PATH_MAX] = {0,};
+ char pidfile[PATH_MAX] = {0,};
+ char volfile[PATH_MAX] = {0,};
+ char logdir[PATH_MAX] = {0,};
+ char logfile[PATH_MAX] = {0,};
+ char volfileid[256] = {0};
+ glusterd_svc_t *svc = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_conn_notify_t notify = NULL;
+ xlator_t *this = NULL;
+ char *volfileserver = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ volinfo = data;
+
+ svc = &(volinfo->snapd.svc);
+
+ ret = snprintf (svc->name, sizeof (svc->name), "%s", snapd_svc_name);
+ if (ret < 0)
+ goto out;
+
+ svc->manager = glusterd_snapdsvc_manager;
+ svc->start = glusterd_snapdsvc_start;
+ svc->stop = glusterd_svc_stop;
+
+ notify = glusterd_snapdsvc_rpc_notify;
+
+ glusterd_svc_build_snapd_rundir (volinfo, rundir, sizeof (rundir));
+ glusterd_svc_create_rundir (rundir);
+
+ /* Initialize the connection mgmt */
+ glusterd_svc_build_snapd_socket_filepath (volinfo, sockpath,
+ sizeof (sockpath));
+ ret = glusterd_conn_init (&(svc->conn), sockpath, 600, notify);
+ if (ret)
+ goto out;
+
+ /* Initialize the process mgmt */
+ glusterd_svc_build_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
+ glusterd_svc_build_snapd_volfile (volinfo, volfile, sizeof (volfile));
+ glusterd_svc_build_snapd_logdir (logdir, volinfo->volname,
+ sizeof (logdir));
+ ret = mkdir_p (logdir, 0755, _gf_true);
+ if ((ret == -1) && (EEXIST != errno)) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create logdir %s",
+ logdir);
+ goto out;
+ }
+ glusterd_svc_build_snapd_logfile (logfile, logdir, sizeof (logfile));
+ snprintf (volfileid, sizeof (volfileid), "snapd/%s", volinfo->volname);
+
+ if (dict_get_str (this->options, "transport.socket.bind-address",
+ &volfileserver) != 0) {
+ volfileserver = "localhost";
+ }
+ ret = glusterd_proc_init (&(svc->proc), snapd_svc_name, pidfile, logdir,
+ logfile, volfile, volfileid, volfileserver);
+ if (ret)
+ goto out;
+
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_snapdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
+{
+ int ret = 0;
+ xlator_t *this = THIS;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ volinfo = data;
+
+ ret = glusterd_is_snapd_enabled (volinfo);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to read volume "
+ "options");
+ goto out;
+ }
+
+ if (ret) {
+ if (!glusterd_is_volume_started (volinfo)) {
+ if (glusterd_proc_is_running (&svc->proc)) {
+ ret = svc->stop (svc, SIGTERM);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Couldn't stop snapd for "
+ "volume: %s",
+ volinfo->volname);
+ } else {
+ /* Since snapd is not running set ret to 0 */
+ ret = 0;
+ }
+ goto out;
+ }
+
+ ret = glusterd_snapdsvc_create_volfile (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't create "
+ "snapd volfile for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+
+ ret = svc->start (svc, flags);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't start "
+ "snapd for volume: %s", volinfo->volname);
+ goto out;
+ }
+
+ glusterd_volinfo_ref (volinfo);
+ ret = glusterd_conn_connect (&(svc->conn));
+ if (ret) {
+ glusterd_volinfo_unref (volinfo);
+ goto out;
+ }
+
+ } else if (glusterd_proc_is_running (&svc->proc)) {
+ ret = svc->stop (svc, SIGTERM);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Couldn't stop snapd for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+ volinfo->snapd.port = 0;
+ }
+
+out:
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int32_t
+glusterd_snapdsvc_start (glusterd_svc_t *svc, int flags)
+{
+ int ret = -1;
+ runner_t runner = {0,};
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ char valgrind_logfile[PATH_MAX] = {0};
+ int snapd_port = 0;
+ char msg[1024] = {0,};
+ char snapd_id[PATH_MAX] = {0,};
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_snapdsvc_t *snapd = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (glusterd_proc_is_running (&svc->proc)) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Get volinfo->snapd from svc object */
+ snapd = list_entry (svc, glusterd_snapdsvc_t, svc);
+ if (!snapd) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get snapd object "
+ "from snapd service");
+ goto out;
+ }
+
+ /* Get volinfo from snapd */
+ volinfo = list_entry (snapd, glusterd_volinfo_t, snapd);
+ if (!volinfo) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get volinfo from "
+ "from snapd");
+ goto out;
+ }
+
+ ret = access (svc->proc.volfile, F_OK);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "snapd Volfile %s is not present", svc->proc.volfile);
+ /* If glusterd is down on one of the nodes and during
+ * that time "USS is enabled" for the first time. After some
+ * time when the glusterd which was down comes back it tries
+ * to look for the snapd volfile and it does not find snapd
+ * volfile and because of this starting of snapd fails.
+ * Therefore, if volfile is not present then create a fresh
+ * volfile.
+ */
+ ret = glusterd_snapdsvc_create_volfile (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't create "
+ "snapd volfile for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+ }
+ runinit (&runner);
+
+ if (priv->valgrind) {
+ snprintf (valgrind_logfile, PATH_MAX, "%s/valgrind-snapd.log",
+ svc->proc.logdir);
+
+ runner_add_args (&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ runner_argprintf (&runner, "--log-file=%s", valgrind_logfile);
+ }
+
+ snprintf (snapd_id, sizeof (snapd_id), "snapd-%s", volinfo->volname);
+ runner_add_args (&runner, SBIN_DIR"/glusterfsd",
+ "-s", svc->proc.volfileserver,
+ "--volfile-id", svc->proc.volfileid,
+ "-p", svc->proc.pidfile,
+ "-l", svc->proc.logfile,
+ "--brick-name", snapd_id,
+ "-S", svc->conn.sockpath, NULL);
+
+ snapd_port = volinfo->snapd.port;
+ if (!snapd_port) {
+ snapd_port = pmap_registry_alloc (THIS);
+ if (!snapd_port) {
+ snprintf (msg, sizeof (msg), "Could not allocate port "
+ "for snapd service for volume %s",
+ volinfo->volname);
+ runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
+ ret = -1;
+ goto out;
+ }
+ }
+ runner_add_arg (&runner, "--brick-port");
+ runner_argprintf (&runner, "%d", snapd_port);
+ runner_add_arg (&runner, "--xlator-option");
+ runner_argprintf (&runner, "%s-server.listen-port=%d",
+ volinfo->volname, snapd_port);
+ runner_add_arg (&runner, "--no-mem-accounting");
+
+ snprintf (msg, sizeof (msg),
+ "Starting the snapd service for volume %s", volinfo->volname);
+ runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
+
+ if (flags == PROC_START_NO_WAIT) {
+ ret = runner_run_nowait (&runner);
+ } else {
+ synclock_unlock (&priv->big_lock);
+ {
+ ret = runner_run (&runner);
+ }
+ synclock_lock (&priv->big_lock);
+ }
+ volinfo->snapd.port = snapd_port;
+
+out:
+ return ret;
+}
+
+int
+glusterd_snapdsvc_restart ()
+{
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = 0;
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = NULL;
+ glusterd_svc_t *svc = NULL;
+
+ GF_ASSERT (this);
+
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ list_for_each_entry (volinfo, &conf->volumes, vol_list) {
+ /* Init per volume snapd svc */
+ ret = glusterd_snapdsvc_init (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "snapd service "
+ "initialization failed for volume %s",
+ volinfo->volname);
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_DEBUG, "snapd service initialized "
+ "for %s", volinfo->volname);
+
+ /* Start per volume snapd svc */
+ if (volinfo->status == GLUSTERD_STATUS_STARTED &&
+ glusterd_is_snapd_enabled (volinfo)) {
+ svc = &(volinfo->snapd.svc);
+ ret = svc->start (svc, PROC_START_NO_WAIT);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Couldn't start snapd for "
+ "vol: %s", volinfo->volname);
+ goto out;
+ }
+ }
+ }
+out:
+ return ret;
+}
+
+int
+glusterd_snapdsvc_rpc_notify (glusterd_conn_t *conn, rpc_clnt_event_t event)
+{
+ int ret = 0;
+ glusterd_svc_t *svc = NULL;
+ xlator_t *this = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_snapdsvc_t *snapd = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ svc = list_entry (conn, glusterd_svc_t, conn);
+ if (!svc) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get the service");
+ return -1;
+ }
+
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ gf_log (this->name, GF_LOG_DEBUG, "%s has connected with "
+ "glusterd.", svc->name);
+ svc->online = _gf_true;
+ break;
+
+ case RPC_CLNT_DISCONNECT:
+ if (svc->online) {
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_NODE_DISCONNECTED, "%s has disconnected "
+ "from glusterd.", svc->name);
+ svc->online = _gf_false;
+ }
+ break;
+
+ case RPC_CLNT_DESTROY:
+ snapd = list_entry (svc, glusterd_snapdsvc_t, svc);
+ if (!snapd) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get the "
+ "snapd object");
+ return -1;
+ }
+
+ volinfo = list_entry (snapd, glusterd_volinfo_t, snapd);
+ if (!volinfo) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get the "
+ "volinfo object");
+ return -1;
+ }
+ glusterd_volinfo_unref (volinfo);
+
+ default:
+ gf_log (this->name, GF_LOG_TRACE,
+ "got some other RPC event %d", event);
+ break;
+ }
+
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.h b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.h
new file mode 100644
index 00000000000..bc5d39f7ffb
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.h
@@ -0,0 +1,44 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_SNAPD_SVC_H_
+#define _GLUSTERD_SNAPD_SVC_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterd-svc-mgmt.h"
+
+typedef struct glusterd_snapdsvc_ glusterd_snapdsvc_t;
+
+struct glusterd_snapdsvc_{
+ glusterd_svc_t svc;
+ int port;
+ gf_store_handle_t *handle;
+};
+
+int
+glusterd_snapdsvc_init (void *data);
+
+int
+glusterd_snapdsvc_manager (glusterd_svc_t *svc, void *data, int flags);
+
+int
+glusterd_snapdsvc_start (glusterd_svc_t *svc, int flags);
+
+int
+glusterd_snapdsvc_restart ();
+
+int
+glusterd_snapdsvc_rpc_notify (glusterd_conn_t *conn, rpc_clnt_event_t event);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index d3ad906705c..aa9010f20b2 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -26,6 +26,9 @@
#include "glusterd-utils.h"
#include "glusterd-store.h"
#include "glusterd-volgen.h"
+#include "glusterd-snapd-svc.h"
+#include "glusterd-svc-helper.h"
+#include "glusterd-snapd-svc-helper.h"
#include "glusterd-snapshot-utils.h"
/*
@@ -1721,12 +1724,6 @@ out:
return ret;
}
-struct rpc_clnt*
-glusterd_snapd_get_rpc (glusterd_volinfo_t *volinfo)
-{
- return volinfo->snapd.rpc;
-}
-
int32_t
glusterd_add_snapd_to_dict (glusterd_volinfo_t *volinfo,
dict_t *dict, int32_t count)
@@ -1767,7 +1764,7 @@ glusterd_add_snapd_to_dict (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
- glusterd_get_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
+ glusterd_svc_build_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
brick_online = gf_is_service_running (pidfile, &pid);
@@ -3231,407 +3228,6 @@ glusterd_is_snapd_enabled (glusterd_volinfo_t *volinfo)
return ret;
}
-void
-glusterd_get_snapd_rundir (glusterd_volinfo_t *volinfo,
- char *path, int path_len)
-{
- char workdir[PATH_MAX] = {0,};
- glusterd_conf_t *priv = THIS->private;
-
- GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
-
- snprintf (path, path_len, "%s/run", workdir);
-}
-
-void
-glusterd_get_snapd_volfile (glusterd_volinfo_t *volinfo,
- char *path, int path_len)
-{
- char workdir[PATH_MAX] = {0,};
- glusterd_conf_t *priv = THIS->private;
-
- GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
-
- snprintf (path, path_len, "%s/%s-snapd.vol", workdir,
- volinfo->volname);
-}
-
-void
-glusterd_get_snapd_pidfile (glusterd_volinfo_t *volinfo,
- char *path, int path_len)
-{
- char rundir[PATH_MAX] = {0,};
-
- glusterd_get_snapd_rundir (volinfo, rundir, sizeof (rundir));
-
- snprintf (path, path_len, "%s/%s-snapd.pid", rundir, volinfo->volname);
-}
-
-void
-glusterd_set_snapd_socket_filepath (glusterd_volinfo_t *volinfo,
- char *path, int path_len)
-{
- char sockfilepath[PATH_MAX] = {0,};
- char rundir[PATH_MAX] = {0,};
-
- glusterd_get_snapd_rundir (volinfo, rundir, sizeof (rundir));
- snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s",
- rundir, uuid_utoa (MY_UUID));
-
- glusterd_set_socket_filepath (sockfilepath, path, path_len);
-}
-
-gf_boolean_t
-glusterd_is_snapd_running (glusterd_volinfo_t *volinfo)
-{
- char pidfile[PATH_MAX] = {0,};
- int pid = -1;
- glusterd_conf_t *priv = THIS->private;
-
- glusterd_get_snapd_pidfile (volinfo, pidfile,
- sizeof (pidfile));
-
- return gf_is_service_running (pidfile, &pid);
-}
-
-int
-glusterd_restart_snapds (glusterd_conf_t *priv)
-{
- glusterd_volinfo_t *volinfo = NULL;
- int ret = 0;
- xlator_t *this = THIS;
-
- list_for_each_entry (volinfo, &priv->volumes, vol_list) {
- if (volinfo->status == GLUSTERD_STATUS_STARTED &&
- glusterd_is_snapd_enabled (volinfo)) {
- ret = glusterd_snapd_start (volinfo,
- _gf_false);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Couldn't start snapd for "
- "vol: %s", volinfo->volname);
- goto out;
- }
- }
- }
-out:
- return ret;
-}
-
-gf_boolean_t
-glusterd_is_snapd_online (glusterd_volinfo_t *volinfo)
-{
- return volinfo->snapd.online;
-}
-
-void
-glusterd_snapd_set_online_status (glusterd_volinfo_t *volinfo,
- gf_boolean_t status)
-{
- volinfo->snapd.online = status;
-}
-
-static inline void
-glusterd_snapd_set_rpc (glusterd_volinfo_t *volinfo, struct rpc_clnt *rpc)
-{
- volinfo->snapd.rpc = rpc;
-}
-
-int32_t
-glusterd_snapd_connect (glusterd_volinfo_t *volinfo, char *socketpath)
-{
- int ret = 0;
- dict_t *options = NULL;
- struct rpc_clnt *rpc = NULL;
- glusterd_conf_t *priv = THIS->private;
-
- rpc = glusterd_snapd_get_rpc (volinfo);
-
- if (rpc == NULL) {
- /* Setting frame-timeout to 10mins (600seconds).
- * Unix domain sockets ensures that the connection is reliable.
- * The default timeout of 30mins used for unreliable network
- * connections is too long for unix domain socket connections.
- */
- ret = rpc_transport_unix_options_build (&options, socketpath,
- 600);
- if (ret)
- goto out;
-
- ret = dict_set_str(options,
- "transport.socket.ignore-enoent", "on");
- if (ret)
- goto out;
-
- glusterd_volinfo_ref (volinfo);
-
- synclock_unlock (&priv->big_lock);
- ret = glusterd_rpc_create (&rpc, options,
- glusterd_snapd_rpc_notify,
- volinfo);
- synclock_lock (&priv->big_lock);
- if (ret)
- goto out;
-
- (void) glusterd_snapd_set_rpc (volinfo, rpc);
- }
-out:
- return ret;
-}
-
-int32_t
-glusterd_snapd_disconnect (glusterd_volinfo_t *volinfo)
-{
- struct rpc_clnt *rpc = NULL;
- glusterd_conf_t *priv = THIS->private;
-
- rpc = glusterd_snapd_get_rpc (volinfo);
-
- (void) glusterd_snapd_set_rpc (volinfo, NULL);
-
- if (rpc)
- glusterd_rpc_clnt_unref (priv, rpc);
-
- return 0;
-}
-
-int32_t
-glusterd_snapd_start (glusterd_volinfo_t *volinfo, gf_boolean_t wait)
-{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- runner_t runner = {0,};
- char pidfile[PATH_MAX] = {0,};
- char logfile[PATH_MAX] = {0,};
- char logdir[PATH_MAX] = {0,};
- char volfile[PATH_MAX] = {0,};
- char glusterd_uuid[1024] = {0,};
- char rundir[PATH_MAX] = {0,};
- char sockfpath[PATH_MAX] = {0,};
- char volfileid[256] = {0};
- char *volfileserver = NULL;
- char valgrind_logfile[PATH_MAX] = {0};
- int snapd_port = 0;
- char *volname = volinfo->volname;
- char snapd_id[PATH_MAX] = {0,};
- char msg[1024] = {0,};
-
- this = THIS;
- GF_ASSERT(this);
-
- if (glusterd_is_snapd_running (volinfo)) {
- ret = 0;
- goto connect;
- }
-
- priv = this->private;
-
- glusterd_get_snapd_rundir (volinfo, rundir, sizeof (rundir));
- ret = mkdir (rundir, 0777);
-
- if ((ret == -1) && (EEXIST != errno)) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to create rundir %s",
- rundir);
- goto out;
- }
-
- glusterd_get_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
- glusterd_get_snapd_volfile (volinfo, volfile, sizeof (volfile));
-
- ret = sys_access (volfile, F_OK);
- if (ret) {
- gf_log (this->name, GF_LOG_DEBUG,
- "snapd Volfile %s is not present", volfile);
-
- /* If glusterd is down on one of the nodes and during
- * that time "USS is enabled" for the first time. After some
- * time when the glusterd which was down comes back it tries
- * to look for the snapd volfile and it does not find snapd
- * volfile and because of this starting of snapd fails.
- * Therefore, if volfile is not present then create a fresh
- * volfile.
- */
- ret = glusterd_create_snapd_volfile (volinfo);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Couldn't create "
- "snapd volfile for volume: %s",
- volinfo->volname);
- goto out;
- }
- }
-
- snprintf (logdir, PATH_MAX, "%s/snaps/%s",
- DEFAULT_LOG_FILE_DIRECTORY, volname);
- ret = mkdir_p (logdir, 0755, _gf_true);
- if ((ret == -1) && (EEXIST != errno)) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to create logdir %s",
- logdir);
- goto out;
- }
-
- snprintf (logfile, PATH_MAX, "%s/snapd.log", logdir);
-
- snprintf (volfileid, sizeof (volfileid), "snapd/%s", volname);
- glusterd_set_snapd_socket_filepath (volinfo, sockfpath,
- sizeof (sockfpath));
-
- if (dict_get_str (this->options, "transport.socket.bind-address",
- &volfileserver) != 0) {
- volfileserver = "localhost";
- }
-
- runinit (&runner);
-
- if (priv->valgrind) {
- snprintf (valgrind_logfile, PATH_MAX, "%s/valgrind-snapd.log",
- logdir);
-
- runner_add_args (&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes",
- NULL);
- runner_argprintf (&runner, "--log-file=%s", valgrind_logfile);
- }
-
- snprintf (snapd_id, sizeof (snapd_id), "snapd-%s", volname);
- runner_add_args (&runner, SBIN_DIR"/glusterfsd",
- "-s", volfileserver,
- "--volfile-id", volfileid,
- "-p", pidfile,
- "-l", logfile,
- "--brick-name", snapd_id,
- "-S", sockfpath, NULL);
-
- snapd_port = volinfo->snapd.port;
- if (!snapd_port) {
- snapd_port = pmap_registry_alloc (THIS);
- if (!snapd_port) {
- snprintf (msg, sizeof (msg), "Could not allocate port "
- "for snapd service for volume %s", volname);
-
- runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
- ret = -1;
- goto out;
- }
- }
-
- runner_add_arg (&runner, "--brick-port");
- runner_argprintf (&runner, "%d", snapd_port);
- runner_add_arg (&runner, "--xlator-option");
- runner_argprintf (&runner, "%s-server.listen-port=%d",
- volname, snapd_port);
- runner_add_arg (&runner, "--no-mem-accounting");
-
- snprintf (msg, sizeof (msg),
- "Starting the snapd service for volume %s", volname);
- runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
-
- if (!wait) {
- ret = runner_run_nowait (&runner);
- } else {
- synclock_unlock (&priv->big_lock);
- {
- ret = runner_run (&runner);
- }
- synclock_lock (&priv->big_lock);
- }
-
- volinfo->snapd.port = snapd_port;
-
-connect:
- if (ret == 0)
- glusterd_snapd_connect (volinfo, sockfpath);
-
-out:
- return ret;
-}
-
-int
-glusterd_snapd_stop (glusterd_volinfo_t *volinfo)
-{
- char pidfile[PATH_MAX] = {0,};
- char sockfpath[PATH_MAX] = {0,};
- glusterd_conf_t *priv = THIS->private;
- int ret = 0;
-
- (void)glusterd_snapd_disconnect (volinfo);
-
- if (!glusterd_is_snapd_running (volinfo))
- goto out;
-
- glusterd_get_snapd_pidfile (volinfo, pidfile, sizeof (pidfile));
- ret = glusterd_service_stop ("snapd", pidfile, SIGTERM, _gf_true);
-
- if (ret == 0) {
- glusterd_set_snapd_socket_filepath (volinfo, sockfpath,
- sizeof (sockfpath));
- (void)glusterd_unlink_file (sockfpath);
- }
-out:
- return ret;
-}
-
-int
-glusterd_handle_snapd_option (glusterd_volinfo_t *volinfo)
-{
- int ret = 0;
- xlator_t *this = THIS;
-
- if (volinfo->is_snap_volume)
- return 0;
-
- ret = glusterd_is_snapd_enabled (volinfo);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to read volume "
- "options");
- goto out;
- }
-
- if (ret) {
- if (!glusterd_is_volume_started (volinfo)) {
- if (glusterd_is_snapd_running (volinfo)) {
- ret = glusterd_snapd_stop (volinfo);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Couldn't stop snapd for "
- "volume: %s",
- volinfo->volname);
- } else {
- /* Since snapd is not running set ret to 0 */
- ret = 0;
- }
- goto out;
- }
-
- ret = glusterd_create_snapd_volfile (volinfo);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Couldn't create "
- "snapd volfile for volume: %s",
- volinfo->volname);
- goto out;
- }
-
- ret = glusterd_snapd_start (volinfo, _gf_false);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Couldn't start "
- "snapd for volume: %s", volinfo->volname);
- goto out;
- }
-
- } else if (glusterd_is_snapd_running (volinfo)) {
- ret = glusterd_snapd_stop (volinfo);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Couldn't stop snapd for volume: %s",
- volinfo->volname);
- goto out;
- }
- volinfo->snapd.port = 0;
- }
-
-out:
- return ret;
-}
int32_t
glusterd_is_snap_soft_limit_reached (glusterd_volinfo_t *volinfo, dict_t *dict)
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
index 86b5e7443b1..41f316625d0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
@@ -137,54 +137,13 @@ glusterd_snapshot_restore_cleanup (dict_t *rsp_dict,
char *volname,
glusterd_snap_t *snap);
-int
-glusterd_handle_snapd_option (glusterd_volinfo_t *volinfo);
-
-int32_t
-glusterd_snapd_disconnect (glusterd_volinfo_t *volinfo);
-
void
glusterd_get_snapd_dir (glusterd_volinfo_t *volinfo,
char *path, int path_len);
-void
-glusterd_get_snapd_rundir (glusterd_volinfo_t *volinfo,
- char *path, int path_len);
-
-void
-glusterd_get_snapd_volfile (glusterd_volinfo_t *volinfo,
- char *path, int path_len);
-
-void
-glusterd_get_snapd_pidfile (glusterd_volinfo_t *volinfo,
- char *path, int path_len);
-
-void
-glusterd_set_snapd_socket_filepath (glusterd_volinfo_t *volinfo,
- char *path, int path_len);
-
-gf_boolean_t
-glusterd_is_snapd_running (glusterd_volinfo_t *volinfo);
-
-int
-glusterd_snapd_stop (glusterd_volinfo_t *volinfo);
-
-int
-glusterd_snapd_start (glusterd_volinfo_t *volinfo, gf_boolean_t wait);
-
int
glusterd_is_snapd_enabled (glusterd_volinfo_t *volinfo);
-gf_boolean_t
-glusterd_is_snapd_online (glusterd_volinfo_t *volinfo);
-
-void
-glusterd_snapd_set_online_status (glusterd_volinfo_t *volinfo,
- gf_boolean_t status);
-
-int
-glusterd_restart_snapds (glusterd_conf_t *priv);
-
int32_t
glusterd_check_and_set_config_limit (glusterd_conf_t *priv);
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 839e40e9dcc..41559e75ea0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -55,6 +55,7 @@
#include "glusterd-mgmt.h"
#include "glusterd-syncop.h"
#include "glusterd-snapshot-utils.h"
+#include "glusterd-snapd-svc.h"
#include "glusterfs3.h"
@@ -8209,6 +8210,14 @@ gd_restore_snap_volume (dict_t *dict, dict_t *rsp_dict,
/* Use the same version as the original version */
new_volinfo->version = orig_vol->version;
+ /* Initialize the snapd service */
+ ret = glusterd_snapdsvc_init (new_volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initialize snapd "
+ "service for volume %s", orig_vol->volname);
+ goto out;
+ }
+
/* Copy the snap vol info to the new_volinfo.*/
ret = glusterd_snap_volinfo_restore (dict, rsp_dict, new_volinfo,
snap_vol, volcount);
diff --git a/xlators/mgmt/glusterd/src/glusterd-statedump.c b/xlators/mgmt/glusterd/src/glusterd-statedump.c
index c7ee70df25b..e4ecf3bcfc3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-statedump.c
+++ b/xlators/mgmt/glusterd/src/glusterd-statedump.c
@@ -15,10 +15,11 @@
#include "statedump.h"
#include "glusterd.h"
+#include "glusterd-shd-svc.h"
+#include "glusterd-quotad-svc.h"
+#include "glusterd-nfs-svc.h"
#include "glusterd-locks.h"
-
-
static void
glusterd_dump_peer (glusterd_peerinfo_t *peerinfo, char *input_key, int index,
gf_boolean_t xpeers)
@@ -223,13 +224,13 @@ glusterd_dump_priv (xlator_t *this)
gf_proc_dump_write (key, "%d", priv->ping_timeout);
gf_proc_dump_build_key (key, "glusterd", "shd.online");
- gf_proc_dump_write (key, "%d", priv->shd->online);
+ gf_proc_dump_write (key, "%d", priv->shd_svc.online);
gf_proc_dump_build_key (key, "glusterd", "nfs.online");
- gf_proc_dump_write (key, "%d", priv->nfs->online);
+ gf_proc_dump_write (key, "%d", priv->nfs_svc.online);
gf_proc_dump_build_key (key, "glusterd", "quotad.online");
- gf_proc_dump_write (key, "%d", priv->quotad->online);
+ gf_proc_dump_write (key, "%d", priv->quotad_svc.online);
GLUSTERD_DUMP_PEERS (&priv->peers, uuid_list, _gf_false);
GLUSTERD_DUMP_PEERS (&priv->xaction_peers, op_peers_list,
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 54c8263a292..70e3536af85 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -2021,7 +2021,8 @@ out:
return ret;
}
-static int
+
+int
glusterd_restore_op_version (xlator_t *this)
{
glusterd_conf_t *conf = NULL;
@@ -4273,13 +4274,6 @@ glusterd_restore ()
this = THIS;
- ret = glusterd_restore_op_version (this);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to restore op_version");
- goto out;
- }
-
ret = glusterd_store_retrieve_volumes (this, NULL);
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h
index b2d21d3a70c..afa96be77cf 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.h
+++ b/xlators/mgmt/glusterd/src/glusterd-store.h
@@ -167,4 +167,8 @@ glusterd_store_update_missed_snaps ();
glusterd_volinfo_t*
glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap);
+
+int
+glusterd_restore_op_version (xlator_t *this);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
new file mode 100644
index 00000000000..f17f34c3530
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
@@ -0,0 +1,125 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "globals.h"
+#include "run.h"
+#include "glusterd.h"
+#include "glusterfs.h"
+#include "glusterd-utils.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-shd-svc.h"
+#include "glusterd-quotad-svc.h"
+#include "glusterd-nfs-svc.h"
+
+int
+glusterd_svcs_reconfigure (glusterd_volinfo_t *volinfo)
+{
+ int ret = 0;
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = NULL;
+
+ GF_ASSERT (this);
+
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ ret = glusterd_nfssvc_reconfigure ();
+ if (ret)
+ goto out;
+
+ if (volinfo && !glusterd_is_shd_compatible_volume (volinfo)) {
+ ; /* Do nothing */
+ } else {
+ ret = glusterd_shdsvc_reconfigure ();
+ if (ret)
+ goto out;
+ }
+ if (conf->op_version == GD_OP_VERSION_MIN)
+ goto out;
+
+ if (volinfo && !glusterd_is_volume_quota_enabled (volinfo))
+ goto out;
+
+ ret = glusterd_quotadsvc_reconfigure ();
+ if (ret)
+ goto out;
+out:
+ return ret;
+}
+
+int
+glusterd_svcs_stop ()
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = glusterd_svc_stop (&(priv->nfs_svc), SIGKILL);
+ if (ret)
+ goto out;
+
+ ret = glusterd_svc_stop (&(priv->shd_svc), SIGTERM);
+ if (ret)
+ goto out;
+
+ ret = glusterd_svc_stop (&(priv->quotad_svc), SIGTERM);
+ if (ret)
+ goto out;
+out:
+ return ret;
+}
+
+int
+glusterd_svcs_manager (glusterd_volinfo_t *volinfo)
+{
+ int ret = 0;
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = NULL;
+
+ GF_ASSERT (this);
+
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ if (volinfo && volinfo->is_snap_volume)
+ return 0;
+
+ ret = conf->nfs_svc.manager (&(conf->nfs_svc), NULL,
+ PROC_START_NO_WAIT);
+ if (ret)
+ goto out;
+
+ ret = conf->shd_svc.manager (&(conf->shd_svc), volinfo,
+ PROC_START_NO_WAIT);
+ if (ret == -EINVAL)
+ ret = 0;
+ if (ret)
+ goto out;
+
+ if (conf->op_version == GD_OP_VERSION_MIN)
+ goto out;
+
+ ret = conf->quotad_svc.manager (&(conf->quotad_svc), volinfo,
+ PROC_START_NO_WAIT);
+ if (ret == -EINVAL)
+ ret = 0;
+ if (ret)
+ goto out;
+out:
+ return ret;
+}
+
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
new file mode 100644
index 00000000000..2af75bc5c9a
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_SVC_HELPER_H_
+#define _GLUSTERD_SVC_HELPER_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterd.h"
+#include "glusterd-svc-mgmt.h"
+
+int
+glusterd_svcs_reconfigure (glusterd_volinfo_t *volinfo);
+
+int
+glusterd_svcs_stop ();
+
+int
+glusterd_svcs_manager (glusterd_volinfo_t *volinfo);
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
new file mode 100644
index 00000000000..83eeda30c81
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
@@ -0,0 +1,339 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "globals.h"
+#include "run.h"
+#include "glusterd.h"
+#include "glusterfs.h"
+#include "glusterd-utils.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-proc-mgmt.h"
+#include "glusterd-conn-mgmt.h"
+#include "glusterd-messages.h"
+
+int
+glusterd_svc_create_rundir (char *rundir)
+{
+ int ret = -1;
+
+ ret = mkdir (rundir, 0777);
+ if ((ret == -1) && (EEXIST != errno)) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Unable to create rundir %s",
+ rundir);
+ }
+ return ret;
+}
+
+static void
+glusterd_svc_build_logfile_path (char *server, char *logdir, char *logfile,
+ size_t len)
+{
+ snprintf (logfile, len, "%s/%s.log", logdir, server);
+}
+
+static void
+glusterd_svc_build_volfileid_path (char *server, char *volfileid, size_t len)
+{
+ snprintf (volfileid, len, "gluster/%s", server);
+}
+
+static int
+glusterd_svc_init_common (glusterd_svc_t *svc,
+ char *svc_name, char *workdir,
+ char *rundir, char *logdir,
+ glusterd_svc_manager_t manager,
+ glusterd_svc_start_t start,
+ glusterd_svc_stop_t stop,
+ glusterd_conn_notify_t notify)
+{
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ char pidfile[PATH_MAX] = {0,};
+ char logfile[PATH_MAX] = {0,};
+ char volfile[PATH_MAX] = {0,};
+ char sockfpath[PATH_MAX] = {0,};
+ char volfileid[256] = {0};
+ char *volfileserver = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = snprintf (svc->name, sizeof (svc->name), "%s", svc_name);
+ if (ret < 0)
+ goto out;
+
+ svc->manager = manager;
+ svc->start = start;
+ svc->stop = stop;
+
+ if (!notify)
+ notify = glusterd_svc_common_rpc_notify;
+
+ glusterd_svc_create_rundir (rundir);
+
+ /* Initialize the connection mgmt */
+ glusterd_conn_build_socket_filepath (rundir, MY_UUID,
+ sockfpath, sizeof (sockfpath));
+
+ ret = glusterd_conn_init (&(svc->conn), sockfpath, 600, notify);
+ if (ret)
+ goto out;
+
+ /* Initialize the process mgmt */
+ glusterd_svc_build_pidfile_path (svc_name, workdir, pidfile,
+ sizeof(pidfile));
+ glusterd_svc_build_volfile_path (svc_name, workdir, volfile,
+ sizeof (volfile));
+
+ glusterd_svc_build_logfile_path (svc_name, logdir, logfile,
+ sizeof (logfile));
+ glusterd_svc_build_volfileid_path (svc_name, volfileid,
+ sizeof(volfileid));
+
+ if (dict_get_str (this->options, "transport.socket.bind-address",
+ &volfileserver) != 0) {
+ volfileserver = "localhost";
+ }
+
+ ret = glusterd_proc_init (&(svc->proc), svc_name, pidfile, logdir,
+ logfile, volfile, volfileid, volfileserver);
+ if (ret)
+ goto out;
+
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
+
+static int
+svc_add_args (dict_t *cmdline, char *arg, data_t *value, void *data)
+{
+ runner_t *runner = data;
+ runner_add_arg (runner, value->data);
+ return 0;
+}
+
+int glusterd_svc_init (glusterd_svc_t *svc, char *svc_name,
+ glusterd_svc_manager_t manager,
+ glusterd_svc_start_t start,
+ glusterd_svc_stop_t stop)
+{
+ int ret = -1;
+ char rundir[PATH_MAX] = {0,};
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ glusterd_svc_build_rundir (svc_name, priv->workdir, rundir,
+ sizeof (rundir));
+ ret = glusterd_svc_init_common (svc, svc_name, priv->workdir, rundir,
+ DEFAULT_LOG_FILE_DIRECTORY, manager,
+ start, stop, NULL);
+
+ return ret;
+}
+
+int
+glusterd_svc_start (glusterd_svc_t *svc, int flags, dict_t *cmdline)
+{
+ int ret = -1;
+ runner_t runner = {0,};
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ char valgrind_logfile[PATH_MAX] = {0};
+ char glusterd_uuid_option[1024] = {0};
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (glusterd_proc_is_running (&(svc->proc))) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = access (svc->proc.volfile, F_OK);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Volfile %s is not present",
+ svc->proc.volfile);
+ goto out;
+ }
+
+ runinit (&runner);
+
+ if (priv->valgrind) {
+ snprintf (valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log",
+ svc->proc.logfile, svc->name);
+
+ runner_add_args (&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ runner_argprintf (&runner, "--log-file=%s", valgrind_logfile);
+ }
+
+ runner_add_args (&runner, SBIN_DIR"/glusterfs",
+ "-s", svc->proc.volfileserver,
+ "--volfile-id", svc->proc.volfileid,
+ "-p", svc->proc.pidfile,
+ "-l", svc->proc.logfile,
+ "-S", svc->conn.sockpath,
+ NULL);
+
+ if (cmdline)
+ dict_foreach (cmdline, svc_add_args, (void *) &runner);
+
+ gf_log (this->name, GF_LOG_DEBUG, "Starting %s service", svc->name);
+
+ if (flags == PROC_START_NO_WAIT) {
+ ret = runner_run_nowait (&runner);
+ } else {
+ synclock_unlock (&priv->big_lock);
+ {
+ ret = runner_run (&runner);
+ }
+ synclock_lock (&priv->big_lock);
+ }
+
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+int glusterd_svc_stop (glusterd_svc_t *svc, int sig)
+{
+ int ret = -1;
+
+ ret = glusterd_proc_stop (&(svc->proc), sig, PROC_STOP_FORCE);
+ if (ret)
+ goto out;
+ glusterd_conn_disconnect (&(svc->conn));
+
+ if (ret == 0) {
+ svc->online = _gf_false;
+ (void) glusterd_unlink_file ((char *)svc->conn.sockpath);
+ }
+out:
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+
+ return ret;
+}
+
+void
+glusterd_svc_build_pidfile_path (char *server, char *workdir, char *path,
+ size_t len)
+{
+ char dir[PATH_MAX] = {0};
+
+ GF_ASSERT (len == PATH_MAX);
+
+ glusterd_svc_build_rundir (server, workdir, dir, sizeof (dir));
+ snprintf (path, len, "%s/%s.pid", dir, server);
+}
+
+void
+glusterd_svc_build_volfile_path (char *server, char *workdir, char *volfile,
+ size_t len)
+{
+ char dir[PATH_MAX] = {0,};
+
+ GF_ASSERT (len == PATH_MAX);
+
+ glusterd_svc_build_svcdir (server, workdir, dir, sizeof (dir));
+ snprintf (volfile, len, "%s/%s-server.vol", dir, server);
+}
+
+void
+glusterd_svc_build_svcdir (char *server, char *workdir, char *path, size_t len)
+{
+ GF_ASSERT (len == PATH_MAX);
+
+ snprintf (path, len, "%s/%s", workdir, server);
+}
+
+void
+glusterd_svc_build_rundir (char *server, char *workdir, char *path, size_t len)
+{
+ char dir[PATH_MAX] = {0};
+
+ GF_ASSERT (len == PATH_MAX);
+
+ glusterd_svc_build_svcdir (server, workdir, dir, sizeof (dir));
+ snprintf (path, len, "%s/run", dir);
+}
+
+int
+glusterd_svc_reconfigure (int (*create_volfile) ())
+{
+ int ret = -1;
+
+ ret = create_volfile ();
+ if (ret)
+ goto out;
+
+ ret = glusterd_fetchspec_notify (THIS);
+out:
+ return ret;
+}
+
+int
+glusterd_svc_common_rpc_notify (glusterd_conn_t *conn,
+ rpc_clnt_event_t event)
+{
+ int ret = 0;
+ glusterd_svc_t *svc = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ /* Get the parent onject i.e. svc using list_entry macro */
+ svc = list_entry (conn, glusterd_svc_t, conn);
+ if (!svc) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get the service");
+ return -1;
+ }
+
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ gf_log (this->name, GF_LOG_DEBUG, "%s has connected with "
+ "glusterd.", svc->name);
+ svc->online = _gf_true;
+ break;
+
+ case RPC_CLNT_DISCONNECT:
+ if (svc->online) {
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_NODE_DISCONNECTED, "%s has disconnected "
+ "from glusterd.", svc->name);
+ svc->online = _gf_false;
+ }
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_TRACE,
+ "got some other RPC event %d", event);
+ break;
+ }
+
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
new file mode 100644
index 00000000000..bb4f6f18fc8
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
@@ -0,0 +1,78 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_SVC_MGMT_H_
+#define _GLUSTERD_SVC_MGMT_H_
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "glusterd-proc-mgmt.h"
+#include "glusterd-conn-mgmt.h"
+
+struct glusterd_svc_;
+typedef struct glusterd_svc_ glusterd_svc_t;
+
+typedef int (*glusterd_svc_manager_t) (glusterd_svc_t *svc,
+ void *data, int flags);
+typedef int (*glusterd_svc_start_t) (glusterd_svc_t *svc, int flags);
+typedef int (*glusterd_svc_stop_t) (glusterd_svc_t *svc, int sig);
+
+struct glusterd_svc_ {
+ char name[PATH_MAX];
+ glusterd_conn_t conn;
+ glusterd_proc_t proc;
+ glusterd_svc_manager_t manager;
+ glusterd_svc_start_t start;
+ glusterd_svc_stop_t stop;
+ gf_boolean_t online;
+};
+
+int
+glusterd_svc_create_rundir (char *rundir);
+
+int
+glusterd_svc_init (glusterd_svc_t *svc, char *svc_name,
+ glusterd_svc_manager_t manager,
+ glusterd_svc_start_t start,
+ glusterd_svc_stop_t stop);
+
+int
+glusterd_svc_start (glusterd_svc_t *svc, int flags, dict_t *cmdline);
+
+int
+glusterd_svc_stop (glusterd_svc_t *svc, int sig);
+
+void
+glusterd_svc_build_pidfile_path (char *server, char *workdir,
+ char *path, size_t len);
+
+void
+glusterd_svc_build_volfile_path (char *server, char *workdir,
+ char *volfile, size_t len);
+
+void
+glusterd_svc_build_svcdir (char *server, char *workdir,
+ char *path, size_t len);
+
+void
+glusterd_svc_build_rundir (char *server, char *workdir,
+ char *path, size_t len);
+
+int
+glusterd_svc_reconfigure (int (*create_volfile) ());
+
+int
+glusterd_svc_common_rpc_notify (glusterd_conn_t *conn,
+ rpc_clnt_event_t event);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 5d28dc24e45..04fa67c6df1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -54,6 +54,12 @@
#include "glusterd-messages.h"
#include "glusterd-volgen.h"
#include "glusterd-snapshot-utils.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-svc-helper.h"
+#include "glusterd-shd-svc.h"
+#include "glusterd-nfs-svc.h"
+#include "glusterd-quotad-svc.h"
+#include "glusterd-snapd-svc.h"
#include "xdr-generic.h"
#include <sys/resource.h>
@@ -3023,7 +3029,7 @@ glusterd_spawn_daemons (void *opaque)
}
glusterd_restart_gsyncds (conf);
glusterd_restart_rebalance (conf);
- ret = glusterd_restart_snapds (conf);
+ ret = glusterd_snapdsvc_restart ();
return ret;
}
@@ -3917,6 +3923,7 @@ glusterd_delete_stale_volume (glusterd_volinfo_t *stale_volinfo,
glusterd_volinfo_t *temp_volinfo = NULL;
glusterd_volinfo_t *voliter = NULL;
xlator_t *this = NULL;
+ glusterd_svc_t *svc = NULL;
GF_ASSERT (stale_volinfo);
GF_ASSERT (valid_volinfo);
@@ -3976,7 +3983,10 @@ glusterd_delete_stale_volume (glusterd_volinfo_t *stale_volinfo,
(void) gf_store_handle_destroy (stale_volinfo->shandle);
stale_volinfo->shandle = NULL;
}
- (void) glusterd_snapd_stop (stale_volinfo);
+ if (!stale_volinfo->is_snap_volume) {
+ svc = &(stale_volinfo->snapd.svc);
+ (void) svc->manager (svc, stale_volinfo, PROC_START_NO_WAIT);
+ }
(void) glusterd_volinfo_remove (stale_volinfo);
return 0;
@@ -4039,6 +4049,7 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count)
xlator_t *this = NULL;
glusterd_volinfo_t *old_volinfo = NULL;
glusterd_volinfo_t *new_volinfo = NULL;
+ glusterd_svc_t *svc = NULL;
GF_ASSERT (peer_data);
@@ -4057,8 +4068,25 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count)
goto out;
}
+ ret = glusterd_snapdsvc_init (new_volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initialize "
+ "snapdsvc for volume %s", new_volinfo->volname);
+ goto out;
+ }
+
ret = glusterd_volinfo_find (new_volinfo->volname, &old_volinfo);
if (0 == ret) {
+ /* snapdsvc initialization of old_volinfo is also required here
+ * as glusterd_delete_stale_volume () invokes snapdsvc manager
+ */
+ ret = glusterd_snapdsvc_init (old_volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initialize"
+ " snapdsvc for old volume %s",
+ old_volinfo->volname);
+ goto out;
+ }
(void) gd_check_and_update_rebalance_info (old_volinfo,
new_volinfo);
(void) glusterd_delete_stale_volume (old_volinfo, new_volinfo);
@@ -4066,8 +4094,10 @@ glusterd_import_friend_volume (dict_t *peer_data, size_t count)
if (glusterd_is_volume_started (new_volinfo)) {
(void) glusterd_start_bricks (new_volinfo);
- if (glusterd_is_snapd_enabled (new_volinfo))
- (void) glusterd_snapd_start (new_volinfo, _gf_false);
+ if (glusterd_is_snapd_enabled (new_volinfo)) {
+ svc = &(new_volinfo->snapd.svc);
+ (void) svc->start (svc, PROC_START_NO_WAIT);
+ }
}
ret = glusterd_store_volinfo (new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
@@ -4213,12 +4243,16 @@ glusterd_compare_friend_data (dict_t *peer_data, int32_t *status,
gf_boolean_t stale_shd = _gf_false;
gf_boolean_t stale_qd = _gf_false;
xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (peer_data);
GF_ASSERT (status);
+ priv = this->private;
+ GF_ASSERT (priv);
+
ret = glusterd_import_global_opts (peer_data);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Importing global "
@@ -4247,24 +4281,25 @@ glusterd_compare_friend_data (dict_t *peer_data, int32_t *status,
}
if (update) {
- if (glusterd_is_nodesvc_running ("nfs"))
+ if (glusterd_proc_is_running (&(priv->nfs_svc.proc)))
stale_nfs = _gf_true;
- if (glusterd_is_nodesvc_running ("glustershd"))
+ if (glusterd_proc_is_running (&(priv->shd_svc.proc)))
stale_shd = _gf_true;
- if (glusterd_is_nodesvc_running ("quotad"))
+ if (glusterd_proc_is_running (&(priv->quotad_svc.proc)))
stale_qd = _gf_true;
ret = glusterd_import_friend_volumes (peer_data);
if (ret)
goto out;
if (_gf_false == glusterd_are_all_volumes_stopped ()) {
- ret = glusterd_nodesvcs_handle_graph_change (NULL);
+ ret = glusterd_svcs_manager (NULL);
} else {
if (stale_nfs)
- glusterd_nfs_server_stop ();
+ priv->nfs_svc.stop (&(priv->nfs_svc), SIGKILL);
if (stale_shd)
- glusterd_shd_stop ();
+ priv->shd_svc.stop (&(priv->shd_svc), SIGTERM);
if (stale_qd)
- glusterd_quotad_stop ();
+ priv->quotad_svc.stop (&(priv->quotad_svc),
+ SIGTERM);
}
}
@@ -4274,116 +4309,13 @@ out:
return ret;
}
-void
-glusterd_get_nodesvc_dir (char *server, char *workdir,
- char *path, size_t len)
-{
- GF_ASSERT (len == PATH_MAX);
- snprintf (path, len, "%s/%s", workdir, server);
-}
-
-void
-glusterd_get_nodesvc_rundir (char *server, char *workdir,
- char *path, size_t len)
-{
- char dir[PATH_MAX] = {0};
- GF_ASSERT (len == PATH_MAX);
-
- glusterd_get_nodesvc_dir (server, workdir, dir, sizeof (dir));
- snprintf (path, len, "%s/run", dir);
-}
-
-void
-glusterd_get_nodesvc_pidfile (char *server, char *workdir,
- char *path, size_t len)
-{
- char dir[PATH_MAX] = {0};
- GF_ASSERT (len == PATH_MAX);
-
- glusterd_get_nodesvc_rundir (server, workdir, dir, sizeof (dir));
- snprintf (path, len, "%s/%s.pid", dir, server);
-}
-
-void
-glusterd_get_nodesvc_volfile (char *server, char *workdir,
- char *volfile, size_t len)
-{
- char dir[PATH_MAX] = {0,};
- GF_ASSERT (len == PATH_MAX);
-
- glusterd_get_nodesvc_dir (server, workdir, dir, sizeof (dir));
- if (strcmp ("quotad", server) != 0)
- snprintf (volfile, len, "%s/%s-server.vol", dir, server);
- else
- snprintf (volfile, len, "%s/%s.vol", dir, server);
-}
-
-void
-glusterd_nodesvc_set_online_status (char *server, gf_boolean_t status)
-{
- glusterd_conf_t *priv = NULL;
-
- GF_ASSERT (server);
- priv = THIS->private;
- GF_ASSERT (priv);
- GF_ASSERT (priv->shd);
- GF_ASSERT (priv->nfs);
- GF_ASSERT (priv->quotad);
-
- if (!strcmp("glustershd", server))
- priv->shd->online = status;
- else if (!strcmp ("nfs", server))
- priv->nfs->online = status;
- else if (!strcmp ("quotad", server))
- priv->quotad->online = status;
-}
-
-gf_boolean_t
-glusterd_is_nodesvc_online (char *server)
-{
- glusterd_conf_t *conf = NULL;
- gf_boolean_t online = _gf_false;
-
- GF_ASSERT (server);
- conf = THIS->private;
- GF_ASSERT (conf);
- GF_ASSERT (conf->shd);
- GF_ASSERT (conf->nfs);
- GF_ASSERT (conf->quotad);
-
- if (!strcmp (server, "glustershd"))
- online = conf->shd->online;
- else if (!strcmp (server, "nfs"))
- online = conf->nfs->online;
- else if (!strcmp (server, "quotad"))
- online = conf->quotad->online;
-
- return online;
-}
-
-int32_t
-glusterd_nodesvc_set_socket_filepath (char *rundir, uuid_t uuid,
- char *socketpath, int len)
-{
- char sockfilepath[PATH_MAX] = {0,};
-
- snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s",
- rundir, uuid_utoa (uuid));
-
- glusterd_set_socket_filepath (sockfilepath, socketpath, len);
- return 0;
-}
-
struct rpc_clnt*
glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)
{
struct rpc_clnt *rpc = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
- nodesrv_t *shd = NULL;
glusterd_volinfo_t *volinfo = NULL;
- nodesrv_t *nfs = NULL;
- nodesrv_t *quotad = NULL;
- glusterd_snapd_t *snapd = NULL;
+ glusterd_svc_t *svc = NULL;
GF_VALIDATE_OR_GOTO (THIS->name, pending_node, out);
GF_VALIDATE_OR_GOTO (THIS->name, pending_node->node, out);
@@ -4392,25 +4324,18 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)
brickinfo = pending_node->node;
rpc = brickinfo->rpc;
- } else if (pending_node->type == GD_NODE_SHD) {
- shd = pending_node->node;
- rpc = shd->rpc;
-
+ } else if (pending_node->type == GD_NODE_SHD ||
+ pending_node->type == GD_NODE_NFS ||
+ pending_node->type == GD_NODE_QUOTAD) {
+ svc = pending_node->node;
+ rpc = svc->conn.rpc;
} else if (pending_node->type == GD_NODE_REBALANCE) {
volinfo = pending_node->node;
if (volinfo->rebal.defrag)
rpc = volinfo->rebal.defrag->rpc;
-
- } else if (pending_node->type == GD_NODE_NFS) {
- nfs = pending_node->node;
- rpc = nfs->rpc;
-
- } else if (pending_node->type == GD_NODE_QUOTAD) {
- quotad = pending_node->node;
- rpc = quotad->rpc;
} else if (pending_node->type == GD_NODE_SNAPD) {
- snapd = pending_node->node;
- rpc = snapd->rpc;
+ volinfo = pending_node->node;
+ rpc = volinfo->snapd.svc.conn.rpc;
} else {
GF_ASSERT (0);
}
@@ -4419,258 +4344,6 @@ out:
return rpc;
}
-struct rpc_clnt*
-glusterd_nodesvc_get_rpc (char *server)
-{
- glusterd_conf_t *priv = NULL;
- struct rpc_clnt *rpc = NULL;
-
- GF_ASSERT (server);
- priv = THIS->private;
- GF_ASSERT (priv);
- GF_ASSERT (priv->shd);
- GF_ASSERT (priv->nfs);
- GF_ASSERT (priv->quotad);
-
- if (!strcmp (server, "glustershd"))
- rpc = priv->shd->rpc;
- else if (!strcmp (server, "nfs"))
- rpc = priv->nfs->rpc;
- else if (!strcmp (server, "quotad"))
- rpc = priv->quotad->rpc;
-
- return rpc;
-}
-
-int32_t
-glusterd_nodesvc_set_rpc (char *server, struct rpc_clnt *rpc)
-{
- int ret = 0;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (priv->shd);
- GF_ASSERT (priv->nfs);
- GF_ASSERT (priv->quotad);
-
- if (!strcmp ("glustershd", server))
- priv->shd->rpc = rpc;
- else if (!strcmp ("nfs", server))
- priv->nfs->rpc = rpc;
- else if (!strcmp ("quotad", server))
- priv->quotad->rpc = rpc;
-
- return ret;
-}
-
-int32_t
-glusterd_nodesvc_connect (char *server, char *socketpath)
-{
- int ret = 0;
- dict_t *options = NULL;
- struct rpc_clnt *rpc = NULL;
- glusterd_conf_t *priv = THIS->private;
-
- rpc = glusterd_nodesvc_get_rpc (server);
-
- if (rpc == NULL) {
- /* Setting frame-timeout to 10mins (600seconds).
- * Unix domain sockets ensures that the connection is reliable.
- * The default timeout of 30mins used for unreliable network
- * connections is too long for unix domain socket connections.
- */
- ret = rpc_transport_unix_options_build (&options, socketpath,
- 600);
- if (ret)
- goto out;
-
- if (!strcmp(server, "glustershd") ||
- !strcmp(server, "nfs") ||
- !strcmp(server, "quotad")) {
- ret = dict_set_str(options, "transport.socket.ignore-enoent", "on");
- if (ret)
- goto out;
- }
-
- ret = glusterd_rpc_create (&rpc, options,
- glusterd_nodesvc_rpc_notify,
- server);
- if (ret)
- goto out;
- (void) glusterd_nodesvc_set_rpc (server, rpc);
- }
-out:
- return ret;
-}
-
-int32_t
-glusterd_nodesvc_disconnect (char *server)
-{
- struct rpc_clnt *rpc = NULL;
- glusterd_conf_t *priv = THIS->private;
-
- rpc = glusterd_nodesvc_get_rpc (server);
- (void)glusterd_nodesvc_set_rpc (server, NULL);
-
- if (rpc)
- glusterd_rpc_clnt_unref (priv, rpc);
-
- return 0;
-}
-
-int32_t
-glusterd_nodesvc_start (char *server, gf_boolean_t wait)
-{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- runner_t runner = {0,};
- char pidfile[PATH_MAX] = {0,};
- char logfile[PATH_MAX] = {0,};
- char volfile[PATH_MAX] = {0,};
- char rundir[PATH_MAX] = {0,};
- char sockfpath[PATH_MAX] = {0,};
- char *volfileserver = NULL;
- char volfileid[256] = {0};
- char glusterd_uuid_option[1024] = {0};
- char valgrind_logfile[PATH_MAX] = {0};
-
- this = THIS;
- GF_ASSERT(this);
-
- priv = this->private;
-
- glusterd_get_nodesvc_rundir (server, priv->workdir,
- rundir, sizeof (rundir));
- ret = mkdir (rundir, 0777);
-
- if ((ret == -1) && (EEXIST != errno)) {
- gf_log ("", GF_LOG_ERROR, "Unable to create rundir %s",
- rundir);
- goto out;
- }
-
- glusterd_get_nodesvc_pidfile (server, priv->workdir,
- pidfile, sizeof (pidfile));
- glusterd_get_nodesvc_volfile (server, priv->workdir,
- volfile, sizeof (volfile));
- ret = access (volfile, F_OK);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "%s Volfile %s is not present",
- server, volfile);
- goto out;
- }
-
- snprintf (logfile, PATH_MAX, "%s/%s.log", DEFAULT_LOG_FILE_DIRECTORY,
- server);
- snprintf (volfileid, sizeof (volfileid), "gluster/%s", server);
-
- if (dict_get_str (this->options, "transport.socket.bind-address",
- &volfileserver) != 0) {
- volfileserver = "localhost";
- }
-
- glusterd_nodesvc_set_socket_filepath (rundir, MY_UUID,
- sockfpath, sizeof (sockfpath));
-
- if (gf_is_service_running(pidfile, NULL))
- goto connect;
-
- runinit (&runner);
-
- if (priv->valgrind) {
- snprintf (valgrind_logfile, PATH_MAX,
- "%s/valgrind-%s.log",
- DEFAULT_LOG_FILE_DIRECTORY,
- server);
-
- runner_add_args (&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes",
- NULL);
- runner_argprintf (&runner, "--log-file=%s", valgrind_logfile);
- }
-
- runner_add_args (&runner, SBIN_DIR"/glusterfs",
- "-s", volfileserver,
- "--volfile-id", volfileid,
- "-p", pidfile,
- "-l", logfile,
- "-S", sockfpath,
- NULL);
-
- if (!strcmp (server, "glustershd")) {
- snprintf (glusterd_uuid_option, sizeof (glusterd_uuid_option),
- "*replicate*.node-uuid=%s", uuid_utoa (MY_UUID));
- runner_add_args (&runner, "--xlator-option",
- glusterd_uuid_option, NULL);
- }
- if (!strcmp (server, "quotad")) {
- runner_add_args (&runner, "--xlator-option",
- "*replicate*.data-self-heal=off",
- "--xlator-option",
- "*replicate*.metadata-self-heal=off",
- "--xlator-option",
- "*replicate*.entry-self-heal=off", NULL);
- }
- runner_log (&runner, "", GF_LOG_DEBUG,
- "Starting the nfs/glustershd services");
-
- if (!wait) {
- ret = runner_run_nowait (&runner);
- } else {
- synclock_unlock (&priv->big_lock);
- {
- ret = runner_run (&runner);
- }
- synclock_lock (&priv->big_lock);
- }
-connect:
- if (ret == 0) {
- glusterd_nodesvc_connect (server, sockfpath);
- }
-out:
- return ret;
-}
-
-int
-glusterd_nfs_server_start ()
-{
- return glusterd_nodesvc_start ("nfs", _gf_false);
-}
-
-int
-glusterd_shd_start ()
-{
- return glusterd_nodesvc_start ("glustershd", _gf_false);
-}
-
-int
-glusterd_quotad_start ()
-{
- return glusterd_nodesvc_start ("quotad", _gf_false);
-}
-
-int
-glusterd_quotad_start_wait ()
-{
- return glusterd_nodesvc_start ("quotad", _gf_true);
-}
-
-gf_boolean_t
-glusterd_is_nodesvc_running (char *server)
-{
- char pidfile[PATH_MAX] = {0,};
- glusterd_conf_t *priv = THIS->private;
-
- glusterd_get_nodesvc_pidfile (server, priv->workdir,
- pidfile, sizeof (pidfile));
- return gf_is_service_running (pidfile, NULL);
-}
-
int32_t
glusterd_unlink_file (char *sockfpath)
{
@@ -4688,46 +4361,6 @@ glusterd_unlink_file (char *sockfpath)
return ret;
}
-int32_t
-glusterd_nodesvc_unlink_socket_file (char *server)
-{
- char sockfpath[PATH_MAX] = {0,};
- char rundir[PATH_MAX] = {0,};
- glusterd_conf_t *priv = THIS->private;
-
- glusterd_get_nodesvc_rundir (server, priv->workdir,
- rundir, sizeof (rundir));
-
- glusterd_nodesvc_set_socket_filepath (rundir, MY_UUID,
- sockfpath, sizeof (sockfpath));
-
- return glusterd_unlink_file (sockfpath);
-}
-
-int32_t
-glusterd_nodesvc_stop (char *server, int sig)
-{
- char pidfile[PATH_MAX] = {0,};
- glusterd_conf_t *priv = THIS->private;
- int ret = 0;
-
- if (!glusterd_is_nodesvc_running (server))
- goto out;
-
- (void)glusterd_nodesvc_disconnect (server);
-
- glusterd_get_nodesvc_pidfile (server, priv->workdir,
- pidfile, sizeof (pidfile));
- ret = glusterd_service_stop (server, pidfile, sig, _gf_true);
-
- if (ret == 0) {
- glusterd_nodesvc_set_online_status (server, _gf_false);
- (void)glusterd_nodesvc_unlink_socket_file (server);
- }
-out:
- return ret;
-}
-
void
glusterd_nfs_pmap_deregister ()
{
@@ -4763,50 +4396,37 @@ glusterd_nfs_pmap_deregister ()
}
int
-glusterd_nfs_server_stop ()
+glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
+ dict_t *vol_opts)
{
- int ret = 0;
- gf_boolean_t deregister = _gf_false;
+ int ret = -1;
+ char pidfile[PATH_MAX] = {0,};
+ gf_boolean_t running = _gf_false;
+ int pid = -1;
+ int port = 0;
+ glusterd_svc_t *svc = NULL;
+ char key[1024] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
- if (glusterd_is_nodesvc_running ("nfs"))
- deregister = _gf_true;
- ret = glusterd_nodesvc_stop ("nfs", SIGKILL);
- if (ret)
- goto out;
- if (deregister)
- glusterd_nfs_pmap_deregister ();
-out:
- return ret;
-}
+ this = THIS;
+ GF_ASSERT (this);
-int
-glusterd_shd_stop ()
-{
- return glusterd_nodesvc_stop ("glustershd", SIGTERM);
-}
+ priv = this->private;
+ GF_ASSERT (priv);
-int
-glusterd_quotad_stop ()
-{
- return glusterd_nodesvc_stop ("quotad", SIGTERM);
-}
+ glusterd_svc_build_pidfile_path (server, priv->workdir, pidfile,
+ sizeof (pidfile));
-int
-glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
- dict_t *vol_opts)
-{
- int ret = -1;
- glusterd_conf_t *priv = THIS->private;
- char pidfile[PATH_MAX] = {0,};
- gf_boolean_t running = _gf_false;
- int pid = -1;
- int port = 0;
- char key[1024] = {0,};
+ if (strcmp(server, priv->shd_svc.name) == 0)
+ svc = &(priv->shd_svc);
+ else if (strcmp(server, priv->nfs_svc.name) == 0)
+ svc = &(priv->nfs_svc);
+ else if (strcmp(server, priv->quotad_svc.name) == 0)
+ svc = &(priv->quotad_svc);
- glusterd_get_nodesvc_pidfile (server, priv->workdir, pidfile,
- sizeof (pidfile));
//Consider service to be running only when glusterd sees it Online
- if (glusterd_is_nodesvc_online (server))
+ if (svc->online)
running = gf_is_service_running (pidfile, &pid);
/* For nfs-servers/self-heal-daemon setting
@@ -4819,11 +4439,11 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
* when output.
*/
snprintf (key, sizeof (key), "brick%d.hostname", count);
- if (!strcmp (server, "nfs"))
+ if (!strcmp (server, priv->nfs_svc.name))
ret = dict_set_str (dict, key, "NFS Server");
- else if (!strcmp (server, "glustershd"))
+ else if (!strcmp (server, priv->shd_svc.name))
ret = dict_set_str (dict, key, "Self-heal Daemon");
- else if (!strcmp (server, "quotad"))
+ else if (!strcmp (server, priv->quotad_svc.name))
ret = dict_set_str (dict, key, "Quota Daemon");
if (ret)
goto out;
@@ -4840,7 +4460,7 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
* Self-heal daemon doesn't provide any port for access
* by entities other than gluster.
*/
- if (!strcmp (server, "nfs")) {
+ if (!strcmp (server, priv->nfs_svc.name)) {
if (dict_get (vol_opts, "nfs.port")) {
ret = dict_get_int32 (vol_opts, "nfs.port", &port);
if (ret)
@@ -4902,210 +4522,6 @@ out:
return ret;
}
-int
-glusterd_check_generate_start_service (int (*create_volfile) (),
- int (*stop) (), int (*start) ())
-{
- int ret = -1;
-
- ret = create_volfile ();
- if (ret)
- goto out;
-
- ret = stop ();
- if (ret)
- goto out;
-
- ret = start ();
-out:
- return ret;
-}
-
-int
-glusterd_reconfigure_nodesvc (int (*create_volfile) ())
-{
- int ret = -1;
-
- ret = create_volfile ();
- if (ret)
- goto out;
-
- ret = glusterd_fetchspec_notify (THIS);
-out:
- return ret;
-}
-
-int
-glusterd_reconfigure_shd ()
-{
- int (*create_volfile) () = glusterd_create_shd_volfile;
- return glusterd_reconfigure_nodesvc (create_volfile);
-}
-
-int
-glusterd_reconfigure_quotad ()
-{
- return glusterd_reconfigure_nodesvc (glusterd_create_quotad_volfile);
-}
-
-int
-glusterd_reconfigure_nfs ()
-{
- int ret = -1;
- gf_boolean_t identical = _gf_false;
-
- /*
- * Check both OLD and NEW volfiles, if they are SAME by size
- * and cksum i.e. "character-by-character". If YES, then
- * NOTHING has been changed, just return.
- */
- ret = glusterd_check_nfs_volfile_identical (&identical);
- if (ret)
- goto out;
-
- if (identical) {
- ret = 0;
- goto out;
- }
-
- /*
- * They are not identical. Find out if the topology is changed
- * OR just the volume options. If just the options which got
- * changed, then inform the xlator to reconfigure the options.
- */
- identical = _gf_false; /* RESET the FLAG */
- ret = glusterd_check_nfs_topology_identical (&identical);
- if (ret)
- goto out;
-
- /* Topology is not changed, but just the options. But write the
- * options to NFS volfile, so that NFS will be reconfigured.
- */
- if (identical) {
- ret = glusterd_create_nfs_volfile();
- if (ret == 0) {/* Only if above PASSES */
- ret = glusterd_fetchspec_notify (THIS);
- }
- goto out;
- }
-
- /*
- * NFS volfile's topology has been changed. NFS server needs
- * to be RESTARTED to ACT on the changed volfile.
- */
- ret = glusterd_check_generate_start_nfs ();
-
-out:
- return ret;
-}
-
-int
-glusterd_check_generate_start_nfs ()
-{
- int ret = 0;
-
- ret = glusterd_check_generate_start_service (glusterd_create_nfs_volfile,
- glusterd_nfs_server_stop,
- glusterd_nfs_server_start);
- return ret;
-}
-
-int
-glusterd_check_generate_start_shd ()
-{
- int ret = 0;
-
- ret = glusterd_check_generate_start_service (glusterd_create_shd_volfile,
- glusterd_shd_stop,
- glusterd_shd_start);
- if (ret == -EINVAL)
- ret = 0;
- return ret;
-}
-
-int
-glusterd_check_generate_start_quotad ()
-{
- int ret = 0;
-
- ret = glusterd_check_generate_start_service (glusterd_create_quotad_volfile,
- glusterd_quotad_stop,
- glusterd_quotad_start);
- if (ret == -EINVAL)
- ret = 0;
- return ret;
-}
-
-/* Blocking start variant of glusterd_check_generate_start_quotad */
-int
-glusterd_check_generate_start_quotad_wait ()
-{
- int ret = 0;
-
- ret = glusterd_check_generate_start_service
- (glusterd_create_quotad_volfile, glusterd_quotad_stop,
- glusterd_quotad_start_wait);
- if (ret == -EINVAL)
- ret = 0;
- return ret;
-}
-
-int
-glusterd_nodesvcs_batch_op (glusterd_volinfo_t *volinfo, int (*nfs_op) (),
- int (*shd_op) (), int (*qd_op) ())
- {
- int ret = 0;
- xlator_t *this = THIS;
- glusterd_conf_t *conf = NULL;
-
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- ret = nfs_op ();
- if (ret)
- goto out;
-
- if (volinfo && !glusterd_is_shd_compatible_volume (volinfo)) {
- ; //do nothing
- } else {
- ret = shd_op ();
- if (ret)
- goto out;
- }
-
- if (conf->op_version == GD_OP_VERSION_MIN)
- goto out;
-
- if (volinfo && !glusterd_is_volume_quota_enabled (volinfo))
- goto out;
-
- ret = qd_op ();
- if (ret)
- goto out;
-
-out:
- return ret;
-}
-
-int
-glusterd_nodesvcs_start (glusterd_volinfo_t *volinfo)
-{
- return glusterd_nodesvcs_batch_op (volinfo,
- glusterd_nfs_server_start,
- glusterd_shd_start,
- glusterd_quotad_start);
-}
-
-int
-glusterd_nodesvcs_stop (glusterd_volinfo_t *volinfo)
-{
- return glusterd_nodesvcs_batch_op (volinfo,
- glusterd_nfs_server_stop,
- glusterd_shd_stop,
- glusterd_quotad_stop);
-}
-
gf_boolean_t
glusterd_are_all_volumes_stopped ()
{
@@ -5171,45 +4587,6 @@ glusterd_all_volumes_with_quota_stopped ()
return _gf_true;
}
-
-int
-glusterd_nodesvcs_handle_graph_change (glusterd_volinfo_t *volinfo)
-{
- int (*shd_op) () = NULL;
- int (*nfs_op) () = NULL;
- int (*qd_op) () = NULL;
-
- if (volinfo && volinfo->is_snap_volume)
- return 0;
-
- shd_op = glusterd_check_generate_start_shd;
- nfs_op = glusterd_check_generate_start_nfs;
- qd_op = glusterd_check_generate_start_quotad;
- if (glusterd_are_all_volumes_stopped ()) {
- shd_op = glusterd_shd_stop;
- nfs_op = glusterd_nfs_server_stop;
- qd_op = glusterd_quotad_stop;
- } else {
- if (glusterd_all_shd_compatible_volumes_stopped()) {
- shd_op = glusterd_shd_stop;
- }
- if (glusterd_all_volumes_with_quota_stopped ()) {
- qd_op = glusterd_quotad_stop;
- }
- }
-
- return glusterd_nodesvcs_batch_op (volinfo, nfs_op, shd_op, qd_op);
-}
-
-int
-glusterd_nodesvcs_handle_reconfigure (glusterd_volinfo_t *volinfo)
-{
- return glusterd_nodesvcs_batch_op (volinfo,
- glusterd_reconfigure_nfs,
- glusterd_reconfigure_shd,
- glusterd_reconfigure_quotad);
-}
-
int
glusterd_volume_count_get (void)
{
@@ -5307,7 +4684,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_snap_t *snap = NULL;
- gf_boolean_t start_nodesvcs = _gf_false;
+ gf_boolean_t start_svcs = _gf_false;
xlator_t *this = NULL;
this = THIS;
@@ -5316,7 +4693,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
list_for_each_entry (volinfo, &conf->volumes, vol_list) {
if (volinfo->status != GLUSTERD_STATUS_STARTED)
continue;
- start_nodesvcs = _gf_true;
+ start_svcs = _gf_true;
gf_log (this->name, GF_LOG_DEBUG, "starting the volume %s",
volinfo->volname);
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
@@ -5328,7 +4705,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
list_for_each_entry (volinfo, &snap->volumes, vol_list) {
if (volinfo->status != GLUSTERD_STATUS_STARTED)
continue;
- start_nodesvcs = _gf_true;
+ start_svcs = _gf_true;
gf_log (this->name, GF_LOG_DEBUG, "starting the snap "
"volume %s", volinfo->volname);
list_for_each_entry (brickinfo, &volinfo->bricks,
@@ -5339,8 +4716,8 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
}
}
- if (start_nodesvcs)
- glusterd_nodesvcs_handle_graph_change (NULL);
+ if (start_svcs)
+ glusterd_svcs_manager (NULL);
return ret;
}
@@ -7130,12 +6507,20 @@ int
glusterd_set_dump_options (char *dumpoptions_path, char *options,
int option_cnt)
{
- int ret = 0;
- char *dup_options = NULL;
- char *option = NULL;
- char *tmpptr = NULL;
- FILE *fp = NULL;
- int nfs_cnt = 0;
+ int ret = 0;
+ char *dup_options = NULL;
+ char *option = NULL;
+ char *tmpptr = NULL;
+ FILE *fp = NULL;
+ int nfs_cnt = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
if (0 == option_cnt ||
(option_cnt == 1 && (!strcmp (options, "nfs ")))) {
@@ -7153,7 +6538,7 @@ glusterd_set_dump_options (char *dumpoptions_path, char *options,
dup_options);
option = strtok_r (dup_options, " ", &tmpptr);
while (option) {
- if (!strcmp (option, "nfs")) {
+ if (!strcmp (option, priv->nfs_svc.name)) {
if (nfs_cnt > 0) {
unlink (dumpoptions_path);
ret = 0;
@@ -7271,7 +6656,7 @@ glusterd_nfs_statedump (char *options, int option_cnt, char **op_errstr)
dup_options = gf_strdup (options);
option = strtok_r (dup_options, " ", &tmpptr);
- if (strcmp (option, "nfs")) {
+ if (strcmp (option, conf->nfs_svc.name)) {
snprintf (msg, sizeof (msg), "for nfs statedump, options should"
" be after the key nfs");
*op_errstr = gf_strdup (msg);
@@ -7346,7 +6731,7 @@ glusterd_quotad_statedump (char *options, int option_cnt, char **op_errstr)
dup_options = gf_strdup (options);
option = strtok_r (dup_options, " ", &tmpptr);
- if (strcmp (option, "quotad")) {
+ if (strcmp (option, conf->quotad_svc.name)) {
snprintf (msg, sizeof (msg), "for quotad statedump, options "
"should be after the key 'quotad'");
*op_errstr = gf_strdup (msg);
@@ -10502,3 +9887,11 @@ glusterd_op_clear_xaction_peers ()
}
}
+
+gf_boolean_t
+glusterd_is_volume_started (glusterd_volinfo_t *volinfo)
+{
+ GF_ASSERT (volinfo);
+ return (volinfo->status == GLUSTERD_STATUS_STARTED);
+}
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 63be397aeac..62e2da03ad3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -187,64 +187,11 @@ glusterd_compute_cksum (glusterd_volinfo_t *volinfo,
gf_boolean_t is_quota_conf);
void
-glusterd_get_nodesvc_volfile (char *server, char *workdir,
- char *volfile, size_t len);
-
-gf_boolean_t
-glusterd_is_nodesvc_running ();
-
-gf_boolean_t
-glusterd_is_nodesvc_running ();
-
-void
-glusterd_get_nodesvc_dir (char *server, char *workdir,
- char *path, size_t len);
-int32_t
-glusterd_nfs_server_start ();
-
-int32_t
-glusterd_nfs_server_stop ();
-
-int32_t
-glusterd_shd_start ();
-
-int32_t
-glusterd_shd_stop ();
-
-int32_t
-glusterd_quotad_start ();
-
-int32_t
-glusterd_quotad_start_wait ();
-
-int32_t
-glusterd_quotad_stop ();
-
-void
glusterd_set_socket_filepath (char *sock_filepath, char *sockpath, size_t len);
-int32_t
-glusterd_nodesvc_set_socket_filepath (char *rundir, uuid_t uuid,
- char *socketpath, int len);
-
struct rpc_clnt*
glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node);
-struct rpc_clnt*
-glusterd_nodesvc_get_rpc (char *server);
-
-int32_t
-glusterd_nodesvc_set_rpc (char *server, struct rpc_clnt *rpc);
-
-int32_t
-glusterd_nodesvc_connect (char *server, char *socketpath);
-
-void
-glusterd_nodesvc_set_online_status (char *server, gf_boolean_t status);
-
-gf_boolean_t
-glusterd_is_nodesvc_online (char *server);
-
int
glusterd_remote_hostname_get (rpcsvc_request_t *req,
char *remote_host, int len);
@@ -253,29 +200,6 @@ glusterd_import_friend_volumes (dict_t *peer_data);
void
glusterd_set_volume_status (glusterd_volinfo_t *volinfo,
glusterd_volume_status status);
-int
-glusterd_check_generate_start_nfs (void);
-
-int
-glusterd_check_generate_start_shd (void);
-
-int
-glusterd_check_generate_start_quotad (void);
-
-int
-glusterd_check_generate_start_quotad_wait (void);
-
-int
-glusterd_nodesvcs_handle_graph_change (glusterd_volinfo_t *volinfo);
-
-int
-glusterd_nodesvcs_handle_reconfigure (glusterd_volinfo_t *volinfo);
-
-int
-glusterd_nodesvcs_start (glusterd_volinfo_t *volinfo);
-
-int
-glusterd_nodesvcs_stop (glusterd_volinfo_t *volinfo);
int32_t
glusterd_volume_count_get (void);
@@ -635,9 +559,6 @@ glusterd_is_volume_quota_enabled (glusterd_volinfo_t *volinfo);
gf_boolean_t
glusterd_all_volumes_with_quota_stopped ();
-int
-glusterd_reconfigure_quotad ();
-
void
glusterd_clean_up_quota_store (glusterd_volinfo_t *volinfo);
@@ -754,4 +675,18 @@ glusterd_import_quota_conf (dict_t *peer_data, int vol_idx,
gf_boolean_t
glusterd_is_shd_compatible_volume (glusterd_volinfo_t *volinfo);
+
+
+gf_boolean_t
+glusterd_are_all_volumes_stopped ();
+
+gf_boolean_t
+glusterd_all_shd_compatible_volumes_stopped ();
+
+void
+glusterd_nfs_pmap_deregister ();
+
+gf_boolean_t
+glusterd_is_volume_started (glusterd_volinfo_t *volinfo);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 29b34883a81..0562af9364b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -35,6 +35,9 @@
#include "run.h"
#include "options.h"
#include "glusterd-snapshot-utils.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-svc-helper.h"
+#include "glusterd-snapd-svc-helper.h"
extern struct volopt_map_entry glusterd_volopt_map[];
@@ -44,13 +47,6 @@ extern struct volopt_map_entry glusterd_volopt_map[];
*
*********************************************/
-
-struct volgen_graph {
- char **errstr;
- glusterfs_graph_t graph;
-};
-typedef struct volgen_graph volgen_graph_t;
-
static void
set_graph_errstr (volgen_graph_t *graph, const char *str)
{
@@ -3428,7 +3424,7 @@ out:
return ret;
}
-static int
+int
build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict)
{
glusterd_volinfo_t *voliter = NULL;
@@ -3475,7 +3471,7 @@ out:
}
/* builds a graph for nfs server role, with option overrides in mod_dict */
-static int
+int
build_nfs_graph (volgen_graph_t *graph, dict_t *mod_dict)
{
volgen_graph_t cgraph = {0,};
@@ -3725,7 +3721,7 @@ glusterd_generate_brick_volfile (glusterd_volinfo_t *volinfo,
return ret;
}
-static int
+int
build_quotad_graph (volgen_graph_t *graph, dict_t *mod_dict)
{
volgen_graph_t cgraph = {0};
@@ -4042,238 +4038,8 @@ out:
}
int
-glusterd_create_rb_volfiles (glusterd_volinfo_t *volinfo,
- glusterd_brickinfo_t *brickinfo)
-{
- int ret = -1;
-
- ret = glusterd_generate_brick_volfile (volinfo, brickinfo);
- if (!ret)
- ret = generate_client_volfiles (volinfo, GF_CLIENT_TRUSTED);
- if (!ret)
- ret = glusterd_fetchspec_notify (THIS);
-
- return ret;
-}
-
-int
-glusterd_create_volfiles (glusterd_volinfo_t *volinfo)
-{
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
-
- ret = generate_brick_volfiles (volinfo);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Could not generate volfiles for bricks");
- goto out;
- }
-
- ret = generate_client_volfiles (volinfo, GF_CLIENT_TRUSTED);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Could not generate trusted client volfiles");
- goto out;
- }
-
- ret = generate_client_volfiles (volinfo, GF_CLIENT_OTHER);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Could not generate client volfiles");
-
-out:
- return ret;
-}
-
-int
-glusterd_create_volfiles_and_notify_services (glusterd_volinfo_t *volinfo)
-{
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
-
- ret = glusterd_create_volfiles (volinfo);
- if (ret)
- goto out;
-
- ret = glusterd_fetchspec_notify (this);
-
-out:
- return ret;
-}
-
-int
-glusterd_create_global_volfile (int (*builder) (volgen_graph_t *graph,
- dict_t *set_dict),
- char *filepath, dict_t *mod_dict)
-{
- volgen_graph_t graph = {0,};
- int ret = -1;
-
- ret = builder (&graph, mod_dict);
- if (!ret)
- ret = volgen_write_volfile (&graph, filepath);
-
- volgen_graph_free (&graph);
-
- return ret;
-}
-
-int
-glusterd_create_nfs_volfile ()
-{
- char filepath[PATH_MAX] = {0,};
- glusterd_conf_t *conf = THIS->private;
-
- glusterd_get_nodesvc_volfile ("nfs", conf->workdir,
- filepath, sizeof (filepath));
- return glusterd_create_global_volfile (build_nfs_graph,
- filepath, NULL);
-}
-
-int
-glusterd_create_shd_volfile ()
-{
- char filepath[PATH_MAX] = {0,};
- int ret = -1;
- glusterd_conf_t *conf = THIS->private;
- dict_t *mod_dict = NULL;
-
- mod_dict = dict_new ();
- if (!mod_dict)
- goto out;
-
- ret = dict_set_uint32 (mod_dict, "cluster.background-self-heal-count", 0);
- if (ret)
- goto out;
-
- ret = dict_set_str (mod_dict, "cluster.data-self-heal", "on");
- if (ret)
- goto out;
-
- ret = dict_set_str (mod_dict, "cluster.metadata-self-heal", "on");
- if (ret)
- goto out;
-
- ret = dict_set_str (mod_dict, "cluster.entry-self-heal", "on");
- if (ret)
- goto out;
-
- glusterd_get_nodesvc_volfile ("glustershd", conf->workdir,
- filepath, sizeof (filepath));
- ret = glusterd_create_global_volfile (build_shd_graph, filepath,
- mod_dict);
-out:
- if (mod_dict)
- dict_unref (mod_dict);
- return ret;
-}
-
-int
-glusterd_check_nfs_volfile_identical (gf_boolean_t *identical)
-{
- char nfsvol[PATH_MAX] = {0,};
- char tmpnfsvol[PATH_MAX] = {0,};
- glusterd_conf_t *conf = NULL;
- xlator_t *this = NULL;
- int ret = -1;
- int need_unlink = 0;
- int tmp_fd = -1;
-
- this = THIS;
-
- GF_ASSERT (this);
- GF_ASSERT (identical);
- conf = this->private;
-
- glusterd_get_nodesvc_volfile ("nfs", conf->workdir,
- nfsvol, sizeof (nfsvol));
-
- snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
-
- tmp_fd = mkstemp (tmpnfsvol);
- if (tmp_fd < 0) {
- gf_log ("", GF_LOG_WARNING, "Unable to create temp file %s: "
- "(%s)", tmpnfsvol, strerror (errno));
- goto out;
- }
-
- need_unlink = 1;
-
- ret = glusterd_create_global_volfile (build_nfs_graph,
- tmpnfsvol, NULL);
- if (ret)
- goto out;
-
- ret = glusterd_check_files_identical (nfsvol, tmpnfsvol,
- identical);
- if (ret)
- goto out;
-
-out:
- if (need_unlink)
- unlink (tmpnfsvol);
-
- if (tmp_fd >= 0)
- close (tmp_fd);
-
- return ret;
-}
-
-int
-glusterd_check_nfs_topology_identical (gf_boolean_t *identical)
-{
- char nfsvol[PATH_MAX] = {0,};
- char tmpnfsvol[PATH_MAX] = {0,};
- glusterd_conf_t *conf = NULL;
- xlator_t *this = THIS;
- int ret = -1;
- int tmpclean = 0;
- int tmpfd = -1;
-
- if ((!identical) || (!this) || (!this->private))
- goto out;
-
- conf = (glusterd_conf_t *) this->private;
-
- /* Fetch the original NFS volfile */
- glusterd_get_nodesvc_volfile ("nfs", conf->workdir,
- nfsvol, sizeof (nfsvol));
-
- /* Create the temporary NFS volfile */
- snprintf (tmpnfsvol, sizeof (tmpnfsvol), "/tmp/gnfs-XXXXXX");
- tmpfd = mkstemp (tmpnfsvol);
- if (tmpfd < 0) {
- gf_log (this->name, GF_LOG_WARNING,
- "Unable to create temp file %s: (%s)",
- tmpnfsvol, strerror (errno));
- goto out;
- }
-
- tmpclean = 1; /* SET the flag to unlink() tmpfile */
-
- ret = glusterd_create_global_volfile (build_nfs_graph,
- tmpnfsvol, NULL);
- if (ret)
- goto out;
-
- /* Compare the topology of volfiles */
- ret = glusterd_check_topology_identical (nfsvol, tmpnfsvol,
- identical);
-out:
- if (tmpfd >= 0)
- close (tmpfd);
- if (tmpclean)
- unlink (tmpnfsvol);
- return ret;
-}
-
-int
-glusterd_generate_snapd_volfile (volgen_graph_t *graph,
- glusterd_volinfo_t *volinfo)
+glusterd_snapdsvc_generate_volfile (volgen_graph_t *graph,
+ glusterd_volinfo_t *volinfo)
{
xlator_t *xl = NULL;
char *username = NULL;
@@ -4370,15 +4136,15 @@ glusterd_generate_snapd_volfile (volgen_graph_t *graph,
}
int
-glusterd_create_snapd_volfile (glusterd_volinfo_t *volinfo)
+glusterd_snapdsvc_create_volfile (glusterd_volinfo_t *volinfo)
{
volgen_graph_t graph = {0,};
int ret = -1;
char filename [PATH_MAX] = {0,};
- glusterd_get_snapd_volfile (volinfo, filename, PATH_MAX);
+ glusterd_svc_build_snapd_volfile (volinfo, filename, PATH_MAX);
- ret = glusterd_generate_snapd_volfile (&graph, volinfo);
+ ret = glusterd_snapdsvc_generate_volfile (&graph, volinfo);
if (!ret)
ret = volgen_write_volfile (&graph, filename);
@@ -4388,17 +4154,85 @@ glusterd_create_snapd_volfile (glusterd_volinfo_t *volinfo)
}
int
-glusterd_create_quotad_volfile (void *data)
+glusterd_create_rb_volfiles (glusterd_volinfo_t *volinfo,
+ glusterd_brickinfo_t *brickinfo)
{
- char filepath[PATH_MAX] = {0,};
- glusterd_conf_t *conf = THIS->private;
+ int ret = -1;
+
+ ret = glusterd_generate_brick_volfile (volinfo, brickinfo);
+ if (!ret)
+ ret = generate_client_volfiles (volinfo, GF_CLIENT_TRUSTED);
+ if (!ret)
+ ret = glusterd_fetchspec_notify (THIS);
- glusterd_get_nodesvc_volfile ("quotad", conf->workdir,
- filepath, sizeof (filepath));
- return glusterd_create_global_volfile (build_quotad_graph,
- filepath, NULL);
+ return ret;
}
+int
+glusterd_create_volfiles (glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ ret = generate_brick_volfiles (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not generate volfiles for bricks");
+ goto out;
+ }
+
+ ret = generate_client_volfiles (volinfo, GF_CLIENT_TRUSTED);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not generate trusted client volfiles");
+ goto out;
+ }
+
+ ret = generate_client_volfiles (volinfo, GF_CLIENT_OTHER);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not generate client volfiles");
+
+out:
+ return ret;
+}
+
+int
+glusterd_create_volfiles_and_notify_services (glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ ret = glusterd_create_volfiles (volinfo);
+ if (ret)
+ goto out;
+
+ ret = glusterd_fetchspec_notify (this);
+
+out:
+ return ret;
+}
+
+int
+glusterd_create_global_volfile (int (*builder) (volgen_graph_t *graph,
+ dict_t *set_dict),
+ char *filepath, dict_t *mod_dict)
+{
+ volgen_graph_t graph = {0,};
+ int ret = -1;
+
+ ret = builder (&graph, mod_dict);
+ if (!ret)
+ ret = volgen_write_volfile (&graph, filepath);
+
+ volgen_graph_free (&graph);
+
+ return ret;
+}
int
glusterd_delete_volfile (glusterd_volinfo_t *volinfo,
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.h b/xlators/mgmt/glusterd/src/glusterd-volgen.h
index 996a36b95ab..a3c093422a6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.h
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.h
@@ -49,6 +49,12 @@ typedef enum {
GF_CLIENT_OTHER
} glusterd_client_type_t;
+struct volgen_graph {
+ char **errstr;
+ glusterfs_graph_t graph;
+};
+typedef struct volgen_graph volgen_graph_t;
+
#define COMPLETE_OPTION(key, completion, ret) \
do { \
if (!strchr (key, '.')) { \
@@ -120,6 +126,18 @@ struct volopt_map_entry {
};
int
+glusterd_snapdsvc_create_volfile (glusterd_volinfo_t *volinfo);
+
+int
+glusterd_snapdsvc_generate_volfile (volgen_graph_t *graph,
+ glusterd_volinfo_t *volinfo);
+
+int
+glusterd_create_global_volfile (int (*builder) (volgen_graph_t *graph,
+ dict_t *set_dict),
+ char *filepath, dict_t *mod_dict);
+
+int
glusterd_create_rb_volfiles (glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo);
@@ -132,19 +150,17 @@ glusterd_create_volfiles_and_notify_services (glusterd_volinfo_t *volinfo);
void
glusterd_get_nfs_filepath (char *filename);
-void glusterd_get_shd_filepath (char *filename);
-
-int
-glusterd_create_nfs_volfile ();
+void
+glusterd_get_shd_filepath (char *filename);
int
-glusterd_create_shd_volfile ();
+build_shd_graph (volgen_graph_t *graph, dict_t *mod_dict);
int
-glusterd_create_quotad_volfile ();
+build_nfs_graph (volgen_graph_t *graph, dict_t *mod_dict);
int
-glusterd_create_snapd_volfile (glusterd_volinfo_t *volinfo);
+build_quotad_graph (volgen_graph_t *graph, dict_t *mod_dict);
int
glusterd_delete_volfile (glusterd_volinfo_t *volinfo,
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index b90518e1c9a..99c658a7d48 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -29,6 +29,10 @@
#include "glusterd-messages.h"
#include "run.h"
#include "glusterd-snapshot-utils.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-svc-helper.h"
+#include "glusterd-shd-svc.h"
+#include "glusterd-snapd-svc.h"
#include <stdint.h>
#include <sys/socket.h>
@@ -1617,6 +1621,8 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+
priv = this->private;
if (!priv) {
ret = -1;
@@ -1704,7 +1710,7 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
break;
default:
- if (!glusterd_is_nodesvc_online("glustershd")){
+ if (!priv->shd_svc.online) {
ret = -1;
*op_errstr = gf_strdup ("Self-heal daemon is "
"not running. Check self-heal "
@@ -2133,6 +2139,12 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr)
volinfo->caps = caps;
+ ret = glusterd_snapdsvc_init (volinfo);
+ if (ret) {
+ *op_errstr = gf_strdup ("Failed to initialize snapd service");
+ goto out;
+ }
+
ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret) {
glusterd_store_delete_volume (volinfo);
@@ -2218,6 +2230,7 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
glusterd_brickinfo_t *brickinfo = NULL;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
+ glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT (this);
@@ -2266,11 +2279,14 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
if (ret)
goto out;
- ret = glusterd_handle_snapd_option (volinfo);
- if (ret)
- goto out;
+ if (!volinfo->is_snap_volume) {
+ svc = &(volinfo->snapd.svc);
+ ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT);
+ if (ret)
+ goto out;
+ }
- ret = glusterd_nodesvcs_handle_graph_change (volinfo);
+ ret = glusterd_svcs_manager (volinfo);
out:
gf_log (this->name, GF_LOG_TRACE, "returning %d ", ret);
@@ -2285,6 +2301,7 @@ glusterd_stop_volume (glusterd_volinfo_t *volinfo)
char mountdir[PATH_MAX] = {0,};
char pidfile[PATH_MAX] = {0,};
xlator_t *this = NULL;
+ glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT (this);
@@ -2326,11 +2343,14 @@ glusterd_stop_volume (glusterd_volinfo_t *volinfo)
mountdir, strerror (errno));
}
- ret = glusterd_handle_snapd_option (volinfo);
- if (ret)
- goto out;
+ if (!volinfo->is_snap_volume) {
+ svc = &(volinfo->snapd.svc);
+ ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT);
+ if (ret)
+ goto out;
+ }
- ret = glusterd_nodesvcs_handle_graph_change (volinfo);
+ ret = glusterd_svcs_manager (volinfo);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to notify graph "
"change for %s volume", volinfo->volname);
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index d44040eb1b1..c977dc1c1c4 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -38,6 +38,11 @@
#include "glusterd-hooks.h"
#include "glusterd-utils.h"
#include "glusterd-locks.h"
+#include "glusterd-svc-mgmt.h"
+#include "glusterd-shd-svc.h"
+#include "glusterd-nfs-svc.h"
+#include "glusterd-quotad-svc.h"
+#include "glusterd-snapd-svc.h"
#include "common-utils.h"
#include "glusterd-geo-rep.h"
#include "run.h"
@@ -1175,6 +1180,47 @@ out:
return ret;
}
+static int
+glusterd_svc_init_all ()
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ /* Init SHD svc */
+ ret = glusterd_shdsvc_init (&(priv->shd_svc));
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to init shd service");
+ goto out;
+ }
+ gf_log (THIS->name, GF_LOG_DEBUG, "shd service initialized");
+
+ /* Init NFS svc */
+ ret = glusterd_nfssvc_init (&(priv->nfs_svc));
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to init nfs service");
+ goto out;
+ }
+ gf_log (THIS->name, GF_LOG_DEBUG, "nfs service initialized");
+
+ /* Init QuotaD svc */
+ ret = glusterd_quotadsvc_init (&(priv->quotad_svc));
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to init quotad "
+ "service");
+ goto out;
+ }
+ gf_log (THIS->name, GF_LOG_DEBUG, "quotad service initialized");
+
+out:
+ return ret;
+}
/*
* init - called during glusterd initialization
@@ -1461,14 +1507,6 @@ init (xlator_t *this)
gf_gld_mt_glusterd_conf_t);
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- conf->shd = GF_CALLOC (1, sizeof (nodesrv_t), gf_gld_mt_nodesrv_t);
- GF_VALIDATE_OR_GOTO(this->name, conf->shd, out);
- conf->nfs = GF_CALLOC (1, sizeof (nodesrv_t), gf_gld_mt_nodesrv_t);
- GF_VALIDATE_OR_GOTO(this->name, conf->nfs, out);
- conf->quotad = GF_CALLOC (1, sizeof (nodesrv_t),
- gf_gld_mt_nodesrv_t);
- GF_VALIDATE_OR_GOTO(this->name, conf->quotad, out);
-
INIT_LIST_HEAD (&conf->peers);
INIT_LIST_HEAD (&conf->xaction_peers);
INIT_LIST_HEAD (&conf->volumes);
@@ -1518,7 +1556,6 @@ init (xlator_t *this)
this->private = conf;
glusterd_mgmt_v3_lock_init ();
glusterd_txn_opinfo_dict_init ();
- (void) glusterd_nodesvc_set_online_status ("glustershd", _gf_false);
GLUSTERD_GET_HOOKS_DIR (hooks_dir, GLUSTERD_HOOK_VER, conf);
if (stat (hooks_dir, &buf)) {
@@ -1548,6 +1585,26 @@ init (xlator_t *this)
if (ret)
goto out;
+ /* Restoring op-version needs to be done before initializing the
+ * services as glusterd_svc_init_common () invokes
+ * glusterd_conn_build_socket_filepath () which uses MY_UUID macro.
+ * MY_UUID generates a new uuid if its not been generated and writes it
+ * in the info file, Since the op-version is not read yet
+ * the default value i.e. 0 will be written for op-version and restore
+ * will fail. This is why restoring op-version needs to happen before
+ * service initialization
+ * */
+ ret = glusterd_restore_op_version (this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to restore op_version");
+ goto out;
+ }
+
+ ret = glusterd_svc_init_all ();
+ if (ret)
+ goto out;
+
ret = glusterd_restore ();
if (ret < 0)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 23a41d925ce..298551c2d9e 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -32,6 +32,7 @@
#include "glusterd-mem-types.h"
#include "rpcsvc.h"
#include "glusterd-sm.h"
+#include "glusterd-snapd-svc.h"
#include "glusterd1-xdr.h"
#include "protocol-common.h"
#include "glusterd-pmap.h"
@@ -118,18 +119,6 @@ struct glusterd_volgen {
};
typedef struct {
- struct rpc_clnt *rpc;
- gf_boolean_t online;
-} nodesrv_t;
-
-typedef struct {
- struct rpc_clnt *rpc;
- int port;
- gf_boolean_t online;
- gf_store_handle_t *handle;
-} glusterd_snapd_t;
-
-typedef struct {
struct _volfile_ctx *volfile;
pthread_mutex_t mutex;
struct list_head peers;
@@ -139,9 +128,9 @@ typedef struct {
uuid_t uuid;
char workdir[PATH_MAX];
rpcsvc_t *rpc;
- nodesrv_t *shd;
- nodesrv_t *nfs;
- nodesrv_t *quotad;
+ glusterd_svc_t shd_svc;
+ glusterd_svc_t nfs_svc;
+ glusterd_svc_t quotad_svc;
struct pmap_registry *pmap;
struct list_head volumes;
struct list_head snapshots; /*List of snap volumes */
@@ -381,7 +370,7 @@ struct glusterd_volinfo_ {
int refcnt;
gd_quorum_status_t quorum_status;
- glusterd_snapd_t snapd;
+ glusterd_snapdsvc_t snapd;
};
typedef enum gd_snap_status_ {
@@ -903,14 +892,6 @@ glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data);
int
-glusterd_snapd_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data);
-
-int
-glusterd_nodesvc_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data);
-
-int
glusterd_rpc_create (struct rpc_clnt **rpc, dict_t *options,
rpc_clnt_notify_t notify_fn, void *notify_data);