summaryrefslogtreecommitdiffstats
path: root/glusterfsd
diff options
context:
space:
mode:
authorJeff Darcy <jdarcy@redhat.com>2017-01-31 14:49:45 -0500
committerShyamsundar Ranganathan <srangana@redhat.com>2017-02-01 19:54:58 -0500
commit83803b4b2d70e9e6e16bb050d7ac8e49ba420893 (patch)
tree9a6c1f3f9a723bf578f78c624d3ce9f44baac6db /glusterfsd
parent80b04666ec7019e132f76f734a88559457702f1b (diff)
core: run many bricks within one glusterfsd process
This patch adds support for multiple brick translator stacks running in a single brick server process. This reduces our per-brick memory usage by approximately 3x, and our appetite for TCP ports even more. It also creates potential to avoid process/thread thrashing, and to improve QoS by scheduling more carefully across the bricks, but realizing that potential will require further work. Multiplexing is controlled by the "cluster.brick-multiplex" global option. By default it's off, and bricks are started in separate processes as before. If multiplexing is enabled, then *compatible* bricks (mostly those with the same transport options) will be started in the same process. Backport of: > Change-Id: I45059454e51d6f4cbb29a4953359c09a408695cb > BUG: 1385758 > Reviewed-on: https://review.gluster.org/14763 Change-Id: I4bce9080f6c93d50171823298fdf920258317ee8 BUG: 1418091 Signed-off-by: Jeff Darcy <jdarcy@redhat.com> Reviewed-on: https://review.gluster.org/16496 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Shyamsundar Ranganathan <srangana@redhat.com>
Diffstat (limited to 'glusterfsd')
-rw-r--r--glusterfsd/src/Makefile.am13
-rw-r--r--glusterfsd/src/gf_attach.c247
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c236
-rw-r--r--glusterfsd/src/glusterfsd.c9
4 files changed, 467 insertions, 38 deletions
diff --git a/glusterfsd/src/Makefile.am b/glusterfsd/src/Makefile.am
index e8a3f99b7fa..0196204bdd6 100644
--- a/glusterfsd/src/Makefile.am
+++ b/glusterfsd/src/Makefile.am
@@ -1,11 +1,17 @@
-sbin_PROGRAMS = glusterfsd
+sbin_PROGRAMS = glusterfsd gf_attach
glusterfsd_SOURCES = glusterfsd.c glusterfsd-mgmt.c
glusterfsd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la ${GF_LDADD}
-
glusterfsd_LDFLAGS = $(GF_LDFLAGS)
+
+gf_attach_SOURCES = gf_attach.c
+gf_attach_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
+ $(top_builddir)/api/src/libgfapi.la \
+ $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
+ $(top_builddir)/rpc/xdr/src/libgfxdr.la
+
noinst_HEADERS = glusterfsd.h glusterfsd-mem-types.h glusterfsd-messages.h
AM_CPPFLAGS = $(GF_CPPFLAGS) \
@@ -15,7 +21,8 @@ AM_CPPFLAGS = $(GF_CPPFLAGS) \
-I$(top_srcdir)/rpc/rpc-lib/src \
-I$(top_srcdir)/rpc/xdr/src \
-I$(top_builddir)/rpc/xdr/src \
- -I$(top_srcdir)/xlators/nfs/server/src
+ -I$(top_srcdir)/xlators/nfs/server/src \
+ -I$(top_srcdir)/api/src
AM_CFLAGS = -Wall $(GF_CFLAGS)
diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c
new file mode 100644
index 00000000000..0393dc5f42f
--- /dev/null
+++ b/glusterfsd/src/gf_attach.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+//#include "config.h"
+#include "glusterfs.h"
+#include "globals.h"
+#include "glfs-internal.h"
+#include "rpc-clnt.h"
+#include "protocol-common.h"
+#include "xdr-generic.h"
+#include "glusterd1-xdr.h"
+
+int done = 0;
+int rpc_status;
+
+struct rpc_clnt_procedure gf_attach_actors[GLUSTERD_BRICK_MAXVALUE] = {
+ [GLUSTERD_BRICK_NULL] = {"NULL", NULL },
+ [GLUSTERD_BRICK_OP] = {"BRICK_OP", NULL },
+};
+
+struct rpc_clnt_program gf_attach_prog = {
+ .progname = "brick operations",
+ .prognum = GD_BRICK_PROGRAM,
+ .progver = GD_BRICK_VERSION,
+ .proctable = gf_attach_actors,
+ .numproc = GLUSTERD_BRICK_MAXVALUE,
+};
+
+/*
+ * In a sane world, the generic RPC layer would be capable of tracking
+ * connection status by itself, with no help from us. It might invoke our
+ * callback if we had registered one, but only to provide information. Sadly,
+ * we don't live in that world. Instead, the callback *must* exist and *must*
+ * call rpc_clnt_{set,unset}_connected, because that's the only way those
+ * fields get set (with RPC both above and below us on the stack). If we don't
+ * do that, then rpc_clnt_submit doesn't think we're connected even when we
+ * are. It calls the socket code to reconnect, but the socket code tracks this
+ * stuff in a sane way so it knows we're connected and returns EINPROGRESS.
+ * Then we're stuck, connected but unable to use the connection. To make it
+ * work, we define and register this trivial callback.
+ */
+int
+my_notify (struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ printf ("connected\n");
+ rpc_clnt_set_connected (&rpc->conn);
+ break;
+ case RPC_CLNT_DISCONNECT:
+ printf ("disconnected\n");
+ rpc_clnt_unset_connected (&rpc->conn);
+ break;
+ default:
+ fprintf (stderr, "unknown RPC event\n");
+ }
+
+ return 0;
+}
+
+int32_t
+my_callback (struct rpc_req *req, struct iovec *iov, int count, void *frame)
+{
+ rpc_status = req->rpc_status;
+ done = 1;
+ return 0;
+}
+
+/* copied from gd_syncop_submit_request */
+int
+send_brick_req (xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
+{
+ int ret = -1;
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ struct iovec iov = {0, };
+ ssize_t req_size = 0;
+ call_frame_t *frame = NULL;
+ gd1_mgmt_brick_op_req brick_req;
+ void *req = &brick_req;
+ int i;
+
+ brick_req.op = op;
+ brick_req.name = path;
+ brick_req.input.input_val = NULL;
+ brick_req.input.input_len = 0;
+
+ req_size = xdr_sizeof ((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
+ iobuf = iobuf_get2 (rpc->ctx->iobuf_pool, req_size);
+ if (!iobuf)
+ goto out;
+
+ iobref = iobref_new ();
+ if (!iobref)
+ goto out;
+
+ frame = create_frame (this, this->ctx->pool);
+ if (!frame)
+ goto out;
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize (iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic (iov, req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret == -1)
+ goto out;
+
+ iov.iov_len = ret;
+
+ for (i = 0; i < 60; ++i) {
+ if (rpc->conn.connected) {
+ break;
+ }
+ sleep (1);
+ }
+
+ /* Send the msg */
+ ret = rpc_clnt_submit (rpc, &gf_attach_prog, op,
+ my_callback, &iov, 1, NULL, 0, iobref, frame,
+ NULL, 0, NULL, 0, NULL);
+ if (!ret) {
+ for (i = 0; !done && (i < 120); ++i) {
+ sleep (1);
+ }
+ }
+
+out:
+
+ iobref_unref (iobref);
+ iobuf_unref (iobuf);
+ STACK_DESTROY (frame->root);
+
+ if (rpc_status != 0) {
+ fprintf (stderr, "got error %d on RPC\n", rpc_status);
+ return EXIT_FAILURE;
+ }
+
+ printf ("OK\n");
+ return EXIT_SUCCESS;
+}
+
+int
+usage (char *prog)
+{
+ fprintf (stderr, "Usage: %s uds_path volfile_path (to attach)\n",
+ prog);
+ fprintf (stderr, " %s -d uds_path brick_path (to detach)\n",
+ prog);
+
+ return EXIT_FAILURE;
+}
+
+int
+main (int argc, char *argv[])
+{
+ glfs_t *fs;
+ struct rpc_clnt *rpc;
+ xlator_t that;
+ dict_t *options;
+ int ret;
+ int op = GLUSTERD_BRICK_ATTACH;
+
+ for (;;) {
+ switch (getopt (argc, argv, "d")) {
+ case 'd':
+ op = GLUSTERD_BRICK_TERMINATE;
+ break;
+ case -1:
+ goto done_parsing;
+ default:
+ return usage (argv[0]);
+ }
+ }
+done_parsing:
+ if (optind != (argc - 2)) {
+ return usage (argv[0]);
+ }
+
+ fs = glfs_new ("gf-attach");
+ if (!fs) {
+ fprintf (stderr, "glfs_new failed\n");
+ return EXIT_FAILURE;
+ }
+ that.ctx = fs->ctx;
+
+ (void) glfs_set_logging (fs, "/dev/stderr", 7);
+ /*
+ * This will actually fail because we haven't defined a volume, but
+ * it will do enough initialization to get us going.
+ */
+ (void) glfs_init (fs);
+
+ options = dict_new();
+ if (!options) {
+ return EXIT_FAILURE;
+ }
+ ret = dict_set_str (options, "transport-type", "socket");
+ if (ret != 0) {
+ fprintf (stderr, "failed to set transport type\n");
+ return EXIT_FAILURE;
+ }
+ ret = dict_set_str (options, "transport.address-family", "unix");
+ if (ret != 0) {
+ fprintf (stderr, "failed to set address family\n");
+ return EXIT_FAILURE;
+ }
+ ret = dict_set_str (options, "transport.socket.connect-path",
+ argv[optind]);
+ if (ret != 0) {
+ fprintf (stderr, "failed to set connect path\n");
+ return EXIT_FAILURE;
+ }
+
+ rpc = rpc_clnt_new (options, fs->ctx->master, "gf-attach-rpc", 0);
+ if (!rpc) {
+ fprintf (stderr, "rpc_clnt_new failed\n");
+ return EXIT_FAILURE;
+ }
+
+ if (rpc_clnt_register_notify (rpc, my_notify, NULL) != 0) {
+ fprintf (stderr, "rpc_clnt_register_notify failed\n");
+ return EXIT_FAILURE;
+ }
+
+ if (rpc_clnt_start(rpc) != 0) {
+ fprintf (stderr, "rpc_clnt_start failed\n");
+ return EXIT_FAILURE;
+ }
+
+ return send_brick_req (fs->ctx->master, rpc, argv[optind+1], op);
+}
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 92c3343ad21..fa03d23b17b 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -184,12 +184,75 @@ glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret)
return ret;
}
+static void
+glusterfs_autoscale_threads (glusterfs_ctx_t *ctx, int incr)
+{
+ struct event_pool *pool = ctx->event_pool;
+
+ pool->auto_thread_count += incr;
+ (void) event_reconfigure_threads (pool, pool->eventthreadcount+incr);
+}
+
int
glusterfs_handle_terminate (rpcsvc_request_t *req)
{
+ gd1_mgmt_brick_op_req xlator_req = {0,};
+ ssize_t ret;
+ xlator_t *top;
+ xlator_t *victim;
+ xlator_list_t **trav_p;
+
+ ret = xdr_to_generic (req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ return -1;
+ }
+
+ /* Find the xlator_list_t that points to our victim. */
+ top = glusterfsd_ctx->active->first;
+ for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
+ victim = (*trav_p)->xlator;
+ if (strcmp (victim->name, xlator_req.name) == 0) {
+ break;
+ }
+ }
+
+ if (!*trav_p) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "can't terminate %s - not found", xlator_req.name);
+ /*
+ * Used to be -ENOENT. However, the caller asked us to make
+ * sure it's down and if it's already down that's good enough.
+ */
+ glusterfs_terminate_response_send (req, 0);
+ goto err;
+ }
glusterfs_terminate_response_send (req, 0);
- cleanup_and_exit (SIGTERM);
+ if ((trav_p == &top->children) && !(*trav_p)->next) {
+ gf_log (THIS->name, GF_LOG_INFO,
+ "terminating after loss of last child %s",
+ xlator_req.name);
+ cleanup_and_exit (SIGTERM);
+ } else {
+ /*
+ * This is terribly unsafe without quiescing or shutting things
+ * down properly (or even locking) but it gets us to the point
+ * where we can test other stuff.
+ *
+ * TBD: finish implementing this "detach" code properly
+ */
+ gf_log (THIS->name, GF_LOG_INFO, "detaching not-only child %s",
+ xlator_req.name);
+ top->notify (top, GF_EVENT_TRANSPORT_CLEANUP, victim);
+ *trav_p = (*trav_p)->next;
+ glusterfs_autoscale_threads (THIS->ctx, -1);
+ }
+
+err:
+ free (xlator_req.name);
+ xlator_req.name = NULL;
return 0;
}
@@ -332,7 +395,7 @@ cont:
active = ctx->active;
any = active->first;
- xlator = xlator_search_by_name (any, xlator_req.name);
+ xlator = get_xlator_by_name (any, xlator_req.name);
if (!xlator) {
snprintf (msg, sizeof (msg), "xlator %s is not loaded",
xlator_req.name);
@@ -756,6 +819,39 @@ out:
}
int
+glusterfs_handle_attach (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {0,};
+ xlator_t *this = NULL;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = xdr_to_generic (req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+
+ if (ret < 0) {
+ /*failed to decode msg;*/
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_INFO, "got attach for %s", xlator_req.name);
+ glusterfs_graph_attach (this->ctx->active, xlator_req.name);
+ glusterfs_autoscale_threads (this->ctx, 1);
+
+out:
+ glusterfs_translator_info_response_send (req, 0, NULL, NULL);
+
+ free (xlator_req.input.input_val);
+ free (xlator_req.name);
+
+ return 0;
+}
+
+int
glusterfs_handle_defrag (rpcsvc_request_t *req)
{
int32_t ret = -1;
@@ -1332,13 +1428,13 @@ glusterfs_handle_barrier (rpcsvc_request_t *req)
gd1_mgmt_brick_op_rsp brick_rsp = {0,};
glusterfs_ctx_t *ctx = NULL;
glusterfs_graph_t *active = NULL;
- xlator_t *any = NULL;
+ xlator_t *top = NULL;
xlator_t *xlator = NULL;
xlator_t *old_THIS = NULL;
dict_t *dict = NULL;
- char name[1024] = {0,};
gf_boolean_t barrier = _gf_true;
gf_boolean_t barrier_err = _gf_false;
+ xlator_list_t *trav;
GF_ASSERT (req);
@@ -1348,15 +1444,22 @@ glusterfs_handle_barrier (rpcsvc_request_t *req)
req->rpc_err = GARBAGE_ARGS;
goto out;
}
- ret = -1;
ctx = glusterfsd_ctx;
- GF_VALIDATE_OR_GOTO (THIS->name, ctx, out);
-
+ GF_ASSERT (ctx);
active = ctx->active;
- GF_VALIDATE_OR_GOTO (THIS->name, active, out);
+ top = active->first;
- any = active->first;
+ for (trav = top->children; trav; trav = trav->next) {
+ if (strcmp (trav->xlator->name, brick_req.name) == 0) {
+ break;
+ }
+ }
+ if (!trav) {
+ ret = -1;
+ goto out;
+ }
+ top = trav->xlator;
dict = dict_new();
if (!dict) {
@@ -1377,12 +1480,11 @@ glusterfs_handle_barrier (rpcsvc_request_t *req)
old_THIS = THIS;
/* Send barrier request to the barrier xlator */
- snprintf (name, sizeof (name), "%s-barrier", brick_req.name);
- xlator = xlator_search_by_name(any, name);
+ xlator = get_xlator_by_type (top, "features/barrier");
if (!xlator) {
ret = -1;
gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
- name);
+ "features/barrier");
goto out;
}
@@ -1390,6 +1492,7 @@ glusterfs_handle_barrier (rpcsvc_request_t *req)
// TODO: Extend this to accept return of errnos
ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_OP, dict);
if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "barrier notify failed");
brick_rsp.op_ret = ret;
brick_rsp.op_errstr = gf_strdup ("Failed to reconfigure "
"barrier.");
@@ -1408,20 +1511,18 @@ glusterfs_handle_barrier (rpcsvc_request_t *req)
THIS = old_THIS;
/* Send barrier request to changelog as well */
-
- memset (name, 0, sizeof (name));
- snprintf (name, sizeof (name), "%s-changelog", brick_req.name);
- xlator = xlator_search_by_name(any, name);
+ xlator = get_xlator_by_type (top, "features/changelog");
if (!xlator) {
ret = -1;
gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
- name);
+ "features/changelog");
goto out;
}
THIS = xlator;
ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_OP, dict);
if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "changelog notify failed");
brick_rsp.op_ret = ret;
brick_rsp.op_errstr = gf_strdup ("changelog notify failed");
goto submit_reply;
@@ -1501,17 +1602,54 @@ rpc_clnt_prog_t clnt_handshake_prog = {
};
rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
- [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL, glusterfs_handle_rpc_msg, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE, glusterfs_handle_terminate, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_translator_info_get, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP, glusterfs_handle_translator_op, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_brick_status, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG", GLUSTERD_BRICK_XLATOR_DEFRAG, glusterfs_handle_defrag, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE, glusterfs_handle_nfs_profile, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_node_status, NULL, 0, DRC_NA},
- [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP", GLUSTERD_VOLUME_BARRIER_OP, glusterfs_handle_volume_barrier_op, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_BARRIER] = {"BARRIER", GLUSTERD_BRICK_BARRIER, glusterfs_handle_barrier, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_BITROT] = {"BITROT", GLUSTERD_NODE_BITROT, glusterfs_handle_bitrot, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_NULL] = {"NULL",
+ GLUSTERD_BRICK_NULL,
+ glusterfs_handle_rpc_msg,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE",
+ GLUSTERD_BRICK_TERMINATE,
+ glusterfs_handle_terminate,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO",
+ GLUSTERD_BRICK_XLATOR_INFO,
+ glusterfs_handle_translator_info_get,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP",
+ GLUSTERD_BRICK_XLATOR_OP,
+ glusterfs_handle_translator_op,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_STATUS] = {"STATUS",
+ GLUSTERD_BRICK_STATUS,
+ glusterfs_handle_brick_status,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG",
+ GLUSTERD_BRICK_XLATOR_DEFRAG,
+ glusterfs_handle_defrag,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE",
+ GLUSTERD_NODE_PROFILE,
+ glusterfs_handle_nfs_profile,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_NODE_STATUS] = {"NFS STATUS",
+ GLUSTERD_NODE_STATUS,
+ glusterfs_handle_node_status,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP",
+ GLUSTERD_VOLUME_BARRIER_OP,
+ glusterfs_handle_volume_barrier_op,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_BARRIER] = {"BARRIER",
+ GLUSTERD_BRICK_BARRIER,
+ glusterfs_handle_barrier,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_NODE_BITROT] = {"BITROT",
+ GLUSTERD_NODE_BITROT,
+ glusterfs_handle_bitrot,
+ NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_ATTACH] = {"ATTACH",
+ GLUSTERD_BRICK_ATTACH,
+ glusterfs_handle_attach,
+ NULL, 0, DRC_NA},
};
struct rpcsvc_program glusterfs_mop_prog = {
@@ -1726,8 +1864,8 @@ out:
}
-int
-glusterfs_volfile_fetch (glusterfs_ctx_t *ctx)
+static int
+glusterfs_volfile_fetch_one (glusterfs_ctx_t *ctx, char *volfile_id)
{
cmd_args_t *cmd_args = NULL;
gf_getspec_req req = {0, };
@@ -1736,10 +1874,13 @@ glusterfs_volfile_fetch (glusterfs_ctx_t *ctx)
dict_t *dict = NULL;
cmd_args = &ctx->cmd_args;
+ if (!volfile_id) {
+ volfile_id = ctx->cmd_args.volfile_id;
+ }
frame = create_frame (THIS, ctx->pool);
- req.key = cmd_args->volfile_id;
+ req.key = volfile_id;
req.flags = 0;
dict = dict_new ();
@@ -1794,6 +1935,35 @@ out:
return ret;
}
+
+int
+glusterfs_volfile_fetch (glusterfs_ctx_t *ctx)
+{
+ xlator_t *server_xl = NULL;
+ xlator_list_t *trav;
+ int ret;
+
+ if (ctx->active) {
+ server_xl = ctx->active->first;
+ if (strcmp (server_xl->type, "protocol/server") != 0) {
+ server_xl = NULL;
+ }
+ }
+ if (!server_xl) {
+ /* Startup (ctx->active not set) or non-server. */
+ return glusterfs_volfile_fetch_one (ctx,
+ ctx->cmd_args.volfile_id);
+ }
+
+ ret = 0;
+ for (trav = server_xl->children; trav; trav = trav->next) {
+ ret |= glusterfs_volfile_fetch_one (ctx,
+ trav->xlator->volfile_id);
+ }
+ return ret;
+}
+
+
int32_t
mgmt_event_notify_cbk (struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
@@ -1941,7 +2111,7 @@ mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
}
server = ctx->cmd_args.curr_server;
if (server->list.next == &ctx->cmd_args.volfile_servers) {
- if (!ctx->active)
+ //if (!ctx->active)
need_term = 1;
emval = ENOTCONN;
GF_LOG_OCCASIONALLY (log_ctr2, "glusterfsd-mgmt",
@@ -1959,7 +2129,7 @@ mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
gf_log ("glusterfsd-mgmt", GF_LOG_ERROR,
"failed to set remote-host: %s",
server->volfile_server);
- if (!ctx->active)
+ //if (!ctx->active)
need_term = 1;
emval = ENOTCONN;
break;
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 5f7a4dc6f36..1f7b63e7594 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -2317,7 +2317,12 @@ glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp)
}
}
- ret = glusterfs_graph_prepare (graph, ctx);
+ xlator_t *xl = graph->first;
+ if (strcmp (xl->type, "protocol/server") == 0) {
+ (void) copy_opts_to_child (xl, FIRST_CHILD (xl), "*auth*");
+ }
+
+ ret = glusterfs_graph_prepare (graph, ctx, ctx->cmd_args.volume_name);
if (ret) {
goto out;
}
@@ -2479,7 +2484,7 @@ main (int argc, char *argv[])
goto out;
}
- /* do this _after_ deamonize() */
+ /* do this _after_ daemonize() */
if (cmd->global_timer_wheel) {
ret = glusterfs_global_timer_wheel_init (ctx);
if (ret)