summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKaleb S. KEITHLEY <kkeithle@redhat.com>2018-03-02 17:04:49 -0500
committerKaleb S. KEITHLEY <kkeithle@redhat.com>2018-03-05 09:25:17 -0500
commit2bb17551a597b382d77bb5ebc2671b45565cd542 (patch)
tree2743fc73f99ec97f40591d850792c54ed97428f9
parent2347debbaf229707b1d957d67cefbe999cbd52d4 (diff)
build: address linkage issues
We have the following undefined symbol error from protocol/server.so: glusterfs_mgmt_pmap_signout glusterfs_autoscale_threads See https://review.gluster.org/19225 (bz#1532238) and https://review.gluster.org/19657 (bz#1550895) (why are there two different bzs for the same bug?) IMO this is a cleaner solution. I.e. moving the above two functions to libgfrpc (.../rpc/rpc-lib/...) I would also, for (foolish) consistency sake, like to see glusterfs_mgmt_pmap_signin() moved from glusterfsd to libgfrpc as well. This works on f28/rawhide, with its new, more restrictive run-time link semantics. The smoke and regression tests on earlier fedora and centos will confirm that it works on those platforms too. Change-Id: I9cfbd1cc15e7ebd9fc31b56ac791287fa2c584de BUG: 1550895 Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c94
-rw-r--r--glusterfsd/src/glusterfsd.c4
-rw-r--r--glusterfsd/src/glusterfsd.h4
-rw-r--r--rpc/rpc-lib/src/Makefile.am3
-rw-r--r--rpc/rpc-lib/src/autoscale-threads.c23
-rw-r--r--rpc/rpc-lib/src/libgfrpc.sym2
-rw-r--r--rpc/rpc-lib/src/mgmt-pmap.c138
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.h3
-rw-r--r--rpc/rpc-lib/src/rpcsvc.h2
-rw-r--r--xlators/protocol/server/src/server.c6
10 files changed, 182 insertions, 97 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index f3a7c75517d..bf55a0770b4 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -185,18 +185,6 @@ glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret)
return ret;
}
-void
-glusterfs_autoscale_threads (glusterfs_ctx_t *ctx, int incr, xlator_t *this)
-{
- struct event_pool *pool = ctx->event_pool;
- server_conf_t *conf = this->private;
- int thread_count = pool->eventthreadcount;
-
- pool->auto_thread_count += incr;
- (void) event_reconfigure_threads (pool, thread_count+incr);
- rpcsvc_ownthread_reconf (conf->rpc, pool->eventthreadcount);
-}
-
static int
xlator_mem_free (xlator_t *xl)
{
@@ -315,7 +303,7 @@ glusterfs_handle_terminate (rpcsvc_request_t *req)
gf_log (THIS->name, GF_LOG_INFO,
"terminating after loss of last child %s",
xlator_req.name);
- glusterfs_mgmt_pmap_signout (glusterfsd_ctx, xlator_req.name);
+ rpc_clnt_mgmt_pmap_signout (glusterfsd_ctx, xlator_req.name);
kill (getpid(), SIGTERM);
} else {
/*
@@ -914,7 +902,8 @@ glusterfs_handle_attach (rpcsvc_request_t *req)
xlator_t *nextchild = NULL;
glusterfs_graph_t *newgraph = NULL;
glusterfs_ctx_t *ctx = NULL;
- xlator_t *protocol_server = NULL;
+ xlator_t *srv_xl = NULL;
+ server_conf_t *srv_conf = NULL;
GF_ASSERT (req);
this = THIS;
@@ -955,9 +944,10 @@ glusterfs_handle_attach (rpcsvc_request_t *req)
/* we need a protocol/server xlator as
* nextchild
*/
- protocol_server = this->ctx->active->first;
- glusterfs_autoscale_threads (this->ctx, 1,
- protocol_server);
+ srv_xl = this->ctx->active->first;
+ srv_conf = (server_conf_t *)srv_xl->private;
+ rpcsvc_autoscale_threads (this->ctx,
+ srv_conf->rpc, 1);
}
} else {
gf_log (this->name, GF_LOG_WARNING,
@@ -2844,73 +2834,3 @@ out:
return ret;
}
-
-static int
-mgmt_pmap_signout_cbk (struct rpc_req *req, struct iovec *iov, int count,
- void *myframe)
-{
- pmap_signout_rsp rsp = {0,};
- int ret = 0;
-
- if (-1 == req->rpc_status) {
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
- goto out;
- }
-
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signout_rsp);
- if (ret < 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed");
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
- goto out;
- }
-
- if (-1 == rsp.op_ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "failed to register the port with glusterd");
- goto out;
- }
-out:
- return 0;
-}
-
-
-int
-glusterfs_mgmt_pmap_signout (glusterfs_ctx_t *ctx, char *brickname)
-{
- int ret = 0;
- pmap_signout_req req = {0, };
- call_frame_t *frame = NULL;
- cmd_args_t *cmd_args = NULL;
- char brick_name[PATH_MAX] = {0,};
-
- frame = create_frame (THIS, ctx->pool);
- cmd_args = &ctx->cmd_args;
-
- if (!cmd_args->brick_port && (!cmd_args->brick_name || !brickname)) {
- gf_log ("fsd-mgmt", GF_LOG_DEBUG,
- "portmapper signout arguments not given");
- goto out;
- }
-
- if (cmd_args->volfile_server_transport &&
- !strcmp(cmd_args->volfile_server_transport, "rdma")) {
- snprintf (brick_name, sizeof(brick_name), "%s.rdma",
- cmd_args->brick_name);
- req.brick = brick_name;
- } else {
- if (brickname)
- req.brick = brickname;
- else
- req.brick = cmd_args->brick_name;
- }
-
- req.port = cmd_args->brick_port;
- req.rdma_port = cmd_args->brick_port2;
- ret = mgmt_submit_request (&req, frame, ctx, &clnt_pmap_prog,
- GF_PMAP_SIGNOUT, mgmt_pmap_signout_cbk,
- (xdrproc_t)xdr_pmap_signout_req);
-out:
- return ret;
-}
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index 32cf20eed02..0cc46c78943 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -1411,10 +1411,10 @@ cleanup_and_exit (int signum)
for (trav_p = &top->children; *trav_p;
trav_p = &(*trav_p)->next) {
victim = (*trav_p)->xlator;
- glusterfs_mgmt_pmap_signout (ctx, victim->name);
+ rpc_clnt_mgmt_pmap_signout (ctx, victim->name);
}
} else {
- glusterfs_mgmt_pmap_signout (ctx, NULL);
+ rpc_clnt_mgmt_pmap_signout (ctx, NULL);
}
/* below part is a racy code where the rpcsvc object is freed.
diff --git a/glusterfsd/src/glusterfsd.h b/glusterfsd/src/glusterfsd.h
index 1854a7e00d4..4cbad534000 100644
--- a/glusterfsd/src/glusterfsd.h
+++ b/glusterfsd/src/glusterfsd.h
@@ -114,7 +114,6 @@ struct _gfd_vol_top_priv {
};
typedef struct _gfd_vol_top_priv gfd_vol_top_priv_t;
-int glusterfs_mgmt_pmap_signout (glusterfs_ctx_t *ctx, char *brick_name);
int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx);
int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx);
void cleanup_and_exit (int signum);
@@ -126,9 +125,6 @@ int glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count,
char *brick_path, double *throughput,
double *time);
void
-glusterfs_autoscale_threads (glusterfs_ctx_t *ctx, int incr, xlator_t *this);
-
-void
xlator_mem_cleanup (xlator_t *this);
extern glusterfs_ctx_t *glusterfsd_ctx;
diff --git a/rpc/rpc-lib/src/Makefile.am b/rpc/rpc-lib/src/Makefile.am
index 95ce812fe7b..81a96476883 100644
--- a/rpc/rpc-lib/src/Makefile.am
+++ b/rpc/rpc-lib/src/Makefile.am
@@ -2,7 +2,8 @@ lib_LTLIBRARIES = libgfrpc.la
libgfrpc_la_SOURCES = auth-unix.c rpcsvc-auth.c rpcsvc.c auth-null.c \
rpc-transport.c xdr-rpc.c xdr-rpcclnt.c rpc-clnt.c auth-glusterfs.c \
- rpc-drc.c $(CONTRIBDIR)/sunrpc/xdr_sizeof.c rpc-clnt-ping.c
+ rpc-drc.c $(CONTRIBDIR)/sunrpc/xdr_sizeof.c rpc-clnt-ping.c \
+ autoscale-threads.c mgmt-pmap.c
EXTRA_DIST = libgfrpc.sym
diff --git a/rpc/rpc-lib/src/autoscale-threads.c b/rpc/rpc-lib/src/autoscale-threads.c
new file mode 100644
index 00000000000..33ba58c490b
--- /dev/null
+++ b/rpc/rpc-lib/src/autoscale-threads.c
@@ -0,0 +1,23 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "event.h"
+#include "rpcsvc.h"
+
+void
+rpcsvc_autoscale_threads (glusterfs_ctx_t *ctx, rpcsvc_t *rpc, int incr)
+{
+ struct event_pool *pool = ctx->event_pool;
+ int thread_count = pool->eventthreadcount;
+
+ pool->auto_thread_count += incr;
+ (void) event_reconfigure_threads (pool, thread_count+incr);
+ rpcsvc_ownthread_reconf (rpc, pool->eventthreadcount);
+}
diff --git a/rpc/rpc-lib/src/libgfrpc.sym b/rpc/rpc-lib/src/libgfrpc.sym
index 7d878abfd4d..4fab688c66d 100644
--- a/rpc/rpc-lib/src/libgfrpc.sym
+++ b/rpc/rpc-lib/src/libgfrpc.sym
@@ -66,3 +66,5 @@ rpc_transport_pollin_destroy
rpc_transport_ref
rpc_transport_unix_options_build
rpc_transport_unref
+rpc_clnt_mgmt_pmap_signout
+rpcsvc_autoscale_threads
diff --git a/rpc/rpc-lib/src/mgmt-pmap.c b/rpc/rpc-lib/src/mgmt-pmap.c
new file mode 100644
index 00000000000..fbcc78a7a7e
--- /dev/null
+++ b/rpc/rpc-lib/src/mgmt-pmap.c
@@ -0,0 +1,138 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "portmap-xdr.h"
+#include "protocol-common.h"
+#include "rpc-clnt.h"
+#include "xdr-generic.h"
+#include "xlator.h"
+
+/* Defining a minimal RPC client program for portmap signout
+ */
+char *clnt_pmap_signout_procs[GF_PMAP_MAXVALUE] = {
+ [GF_PMAP_SIGNOUT] = "SIGNOUT",
+};
+
+
+rpc_clnt_prog_t clnt_pmap_signout_prog = {
+ .progname = "Gluster Portmap",
+ .prognum = GLUSTER_PMAP_PROGRAM,
+ .progver = GLUSTER_PMAP_VERSION,
+ .procnames = clnt_pmap_signout_procs,
+};
+
+static int
+mgmt_pmap_signout_cbk (struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ pmap_signout_rsp rsp = {0,};
+ int ret = 0;
+
+ if (-1 == req->rpc_status) {
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signout_rsp);
+ if (ret < 0) {
+ gf_log (THIS->name, GF_LOG_ERROR, "XDR decoding failed");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "failed to register the port with glusterd");
+ goto out;
+ }
+out:
+ return 0;
+}
+
+int
+rpc_clnt_mgmt_pmap_signout (glusterfs_ctx_t *ctx, char *brickname)
+{
+ int ret = 0;
+ pmap_signout_req req = {0, };
+ call_frame_t *frame = NULL;
+ cmd_args_t *cmd_args = NULL;
+ char brick_name[PATH_MAX] = {0,};
+ struct iovec iov = {0, };
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ ssize_t xdr_size = 0;
+
+ frame = create_frame (THIS, ctx->pool);
+ cmd_args = &ctx->cmd_args;
+
+ if (!cmd_args->brick_port && (!cmd_args->brick_name || !brickname)) {
+ gf_log ("fsd-mgmt", GF_LOG_DEBUG,
+ "portmapper signout arguments not given");
+ goto out;
+ }
+
+ if (cmd_args->volfile_server_transport &&
+ !strcmp(cmd_args->volfile_server_transport, "rdma")) {
+ snprintf (brick_name, sizeof(brick_name), "%s.rdma",
+ cmd_args->brick_name);
+ req.brick = brick_name;
+ } else {
+ if (brickname)
+ req.brick = brickname;
+ else
+ req.brick = cmd_args->brick_name;
+ }
+
+ req.port = cmd_args->brick_port;
+ req.rdma_port = cmd_args->brick_port2;
+
+ /* mgmt_submit_request is not available in libglusterfs.
+ * Need to serialize and submit manually.
+ */
+ iobref = iobref_new ();
+ if (!iobref) {
+ goto out;
+ }
+
+ xdr_size = xdr_sizeof ((xdrproc_t)xdr_pmap_signout_req, &req);
+ iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size);
+ if (!iobuf) {
+ goto out;
+ };
+
+ iobref_add (iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize (iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic (iov, &req,
+ (xdrproc_t)xdr_pmap_signout_req);
+ if (ret == -1) {
+ gf_log (THIS->name, GF_LOG_WARNING,
+ "failed to create XDR payload");
+ goto out;
+ }
+ iov.iov_len = ret;
+
+ ret = rpc_clnt_submit (ctx->mgmt, &clnt_pmap_signout_prog,
+ GF_PMAP_SIGNOUT, mgmt_pmap_signout_cbk,
+ &iov, 1,
+ NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
+out:
+ if (iobref)
+ iobref_unref (iobref);
+
+ if (iobuf)
+ iobuf_unref (iobuf);
+ return ret;
+}
diff --git a/rpc/rpc-lib/src/rpc-clnt.h b/rpc/rpc-lib/src/rpc-clnt.h
index 867592122cd..ea81b41c180 100644
--- a/rpc/rpc-lib/src/rpc-clnt.h
+++ b/rpc/rpc-lib/src/rpc-clnt.h
@@ -261,4 +261,7 @@ rpc_clnt_disconnect (struct rpc_clnt *rpc);
char
rpc_clnt_is_disabled (struct rpc_clnt *rpc);
+int
+rpc_clnt_mgmt_pmap_signout (glusterfs_ctx_t *ctx, char *brick_name);
+
#endif /* !_RPC_CLNT_H */
diff --git a/rpc/rpc-lib/src/rpcsvc.h b/rpc/rpc-lib/src/rpcsvc.h
index ec76b659965..dfd19845f5e 100644
--- a/rpc/rpc-lib/src/rpcsvc.h
+++ b/rpc/rpc-lib/src/rpcsvc.h
@@ -647,4 +647,6 @@ rpcsvc_get_program_vector_sizer (rpcsvc_t *svc, uint32_t prognum,
uint32_t progver, int procnum);
extern int
rpcsvc_ownthread_reconf (rpcsvc_t *svc, int new_eventthreadcount);
+
+void rpcsvc_autoscale_threads (glusterfs_ctx_t *ctx, rpcsvc_t *rpc, int incr);
#endif
diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
index 758264a638b..cc68df7f51b 100644
--- a/xlators/protocol/server/src/server.c
+++ b/xlators/protocol/server/src/server.c
@@ -23,6 +23,7 @@
#include "event.h"
#include "events.h"
#include "server-messages.h"
+#include "rpc-clnt.h"
#include "glusterfsd.h"
rpcsvc_cbk_program_t server_cbk_prog = {
@@ -1503,10 +1504,9 @@ server_notify (xlator_t *this, int32_t event, void *data, ...)
UNLOCK (&ctx->volfile_lock);
if (victim_found)
(*trav_p) = (*trav_p)->next;
- glusterfs_mgmt_pmap_signout (ctx,
- victim->name);
+ rpc_clnt_mgmt_pmap_signout (ctx, victim->name);
/* we need the protocol/server xlator here as 'this' */
- glusterfs_autoscale_threads (ctx, -1, this);
+ rpcsvc_autoscale_threads (ctx, conf->rpc, -1);
}
break;