summaryrefslogtreecommitdiffstats
path: root/glusterfsd/src
diff options
context:
space:
mode:
authorGluster Ant <bugzilla-bot@gluster.org>2018-09-12 17:52:45 +0530
committerNigel Babu <nigelb@redhat.com>2018-09-12 17:52:45 +0530
commite16868dede6455cab644805af6fe1ac312775e13 (patch)
tree15aebdb4fff2d87cf8a72f836816b3aa634da58d /glusterfsd/src
parent45a71c0548b6fd2c757aa2e7b7671a1411948894 (diff)
Land part 2 of clang-format changes
Change-Id: Ia84cc24c8924e6d22d02ac15f611c10e26db99b4 Signed-off-by: Nigel Babu <nigelb@redhat.com>
Diffstat (limited to 'glusterfsd/src')
-rw-r--r--glusterfsd/src/gf_attach.c293
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c4845
-rw-r--r--glusterfsd/src/glusterfsd.c4226
3 files changed, 4658 insertions, 4706 deletions
diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c
index 1c1106c06f6..07ec0ed0686 100644
--- a/glusterfsd/src/gf_attach.c
+++ b/glusterfsd/src/gf_attach.c
@@ -25,189 +25,186 @@ int done = 0;
int rpc_status;
struct rpc_clnt_procedure gf_attach_actors[GLUSTERD_BRICK_MAXVALUE] = {
- [GLUSTERD_BRICK_NULL] = {"NULL", NULL },
- [GLUSTERD_BRICK_OP] = {"BRICK_OP", NULL },
+ [GLUSTERD_BRICK_NULL] = {"NULL", NULL},
+ [GLUSTERD_BRICK_OP] = {"BRICK_OP", NULL},
};
struct rpc_clnt_program gf_attach_prog = {
- .progname = "brick operations",
- .prognum = GD_BRICK_PROGRAM,
- .progver = GD_BRICK_VERSION,
- .proctable = gf_attach_actors,
- .numproc = GLUSTERD_BRICK_MAXVALUE,
+ .progname = "brick operations",
+ .prognum = GD_BRICK_PROGRAM,
+ .progver = GD_BRICK_VERSION,
+ .proctable = gf_attach_actors,
+ .numproc = GLUSTERD_BRICK_MAXVALUE,
};
int32_t
-my_callback (struct rpc_req *req, struct iovec *iov, int count, void *frame)
+my_callback(struct rpc_req *req, struct iovec *iov, int count, void *frame)
{
- rpc_status = req->rpc_status;
- done = 1;
- return 0;
+ rpc_status = req->rpc_status;
+ done = 1;
+ return 0;
}
/* copied from gd_syncop_submit_request */
int
-send_brick_req (xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
+send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
{
- int ret = -1;
- struct iobuf *iobuf = NULL;
- struct iobref *iobref = NULL;
- struct iovec iov = {0, };
- ssize_t req_size = 0;
- call_frame_t *frame = NULL;
- gd1_mgmt_brick_op_req brick_req;
- void *req = &brick_req;
- int i;
-
- brick_req.op = op;
- brick_req.name = path;
- brick_req.input.input_val = NULL;
- brick_req.input.input_len = 0;
-
- req_size = xdr_sizeof ((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
- iobuf = iobuf_get2 (rpc->ctx->iobuf_pool, req_size);
- if (!iobuf)
- goto out;
-
- iobref = iobref_new ();
- if (!iobref)
- goto out;
-
- frame = create_frame (this, this->ctx->pool);
- if (!frame)
- goto out;
-
- iobref_add (iobref, iobuf);
-
- iov.iov_base = iobuf->ptr;
- iov.iov_len = iobuf_pagesize (iobuf);
-
- /* Create the xdr payload */
- ret = xdr_serialize_generic (iov, req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret == -1)
- goto out;
-
- iov.iov_len = ret;
-
- for (i = 0; i < 60; ++i) {
- if (rpc->conn.connected) {
- break;
- }
- sleep (1);
+ int ret = -1;
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ struct iovec iov = {
+ 0,
+ };
+ ssize_t req_size = 0;
+ call_frame_t *frame = NULL;
+ gd1_mgmt_brick_op_req brick_req;
+ void *req = &brick_req;
+ int i;
+
+ brick_req.op = op;
+ brick_req.name = path;
+ brick_req.input.input_val = NULL;
+ brick_req.input.input_len = 0;
+
+ req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
+ iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
+ if (!iobuf)
+ goto out;
+
+ iobref = iobref_new();
+ if (!iobref)
+ goto out;
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame)
+ goto out;
+
+ iobref_add(iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize(iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic(iov, req, (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret == -1)
+ goto out;
+
+ iov.iov_len = ret;
+
+ for (i = 0; i < 60; ++i) {
+ if (rpc->conn.connected) {
+ break;
}
-
- /* Send the msg */
- ret = rpc_clnt_submit (rpc, &gf_attach_prog, op,
- my_callback, &iov, 1, NULL, 0, iobref, frame,
- NULL, 0, NULL, 0, NULL);
- if (!ret) {
- for (i = 0; !done && (i < 120); ++i) {
- sleep (1);
- }
+ sleep(1);
+ }
+
+ /* Send the msg */
+ ret = rpc_clnt_submit(rpc, &gf_attach_prog, op, my_callback, &iov, 1, NULL,
+ 0, iobref, frame, NULL, 0, NULL, 0, NULL);
+ if (!ret) {
+ for (i = 0; !done && (i < 120); ++i) {
+ sleep(1);
}
+ }
out:
- iobref_unref (iobref);
- iobuf_unref (iobuf);
- if (frame)
- STACK_DESTROY (frame->root);
+ iobref_unref(iobref);
+ iobuf_unref(iobuf);
+ if (frame)
+ STACK_DESTROY(frame->root);
- if (rpc_status != 0) {
- fprintf (stderr, "got error %d on RPC\n", rpc_status);
- return EXIT_FAILURE;
- }
+ if (rpc_status != 0) {
+ fprintf(stderr, "got error %d on RPC\n", rpc_status);
+ return EXIT_FAILURE;
+ }
- printf ("OK\n");
- return EXIT_SUCCESS;
+ printf("OK\n");
+ return EXIT_SUCCESS;
}
int
-usage (char *prog)
+usage(char *prog)
{
- fprintf (stderr, "Usage: %s uds_path volfile_path (to attach)\n",
- prog);
- fprintf (stderr, " %s -d uds_path brick_path (to detach)\n",
- prog);
+ fprintf(stderr, "Usage: %s uds_path volfile_path (to attach)\n", prog);
+ fprintf(stderr, " %s -d uds_path brick_path (to detach)\n", prog);
- return EXIT_FAILURE;
+ return EXIT_FAILURE;
}
int
-main (int argc, char *argv[])
+main(int argc, char *argv[])
{
- glfs_t *fs;
- struct rpc_clnt *rpc;
- dict_t *options;
- int ret;
- int op = GLUSTERD_BRICK_ATTACH;
-
- for (;;) {
- switch (getopt (argc, argv, "d")) {
- case 'd':
- op = GLUSTERD_BRICK_TERMINATE;
- break;
- case -1:
- goto done_parsing;
- default:
- return usage (argv[0]);
- }
+ glfs_t *fs;
+ struct rpc_clnt *rpc;
+ dict_t *options;
+ int ret;
+ int op = GLUSTERD_BRICK_ATTACH;
+
+ for (;;) {
+ switch (getopt(argc, argv, "d")) {
+ case 'd':
+ op = GLUSTERD_BRICK_TERMINATE;
+ break;
+ case -1:
+ goto done_parsing;
+ default:
+ return usage(argv[0]);
}
+ }
done_parsing:
- if (optind != (argc - 2)) {
- return usage (argv[0]);
- }
+ if (optind != (argc - 2)) {
+ return usage(argv[0]);
+ }
- fs = glfs_new ("gf-attach");
- if (!fs) {
- fprintf (stderr, "glfs_new failed\n");
- return EXIT_FAILURE;
- }
+ fs = glfs_new("gf-attach");
+ if (!fs) {
+ fprintf(stderr, "glfs_new failed\n");
+ return EXIT_FAILURE;
+ }
- (void) glfs_set_logging (fs, "/dev/stderr", 7);
- /*
- * This will actually fail because we haven't defined a volume, but
- * it will do enough initialization to get us going.
- */
- (void) glfs_init (fs);
+ (void)glfs_set_logging(fs, "/dev/stderr", 7);
+ /*
+ * This will actually fail because we haven't defined a volume, but
+ * it will do enough initialization to get us going.
+ */
+ (void)glfs_init(fs);
- options = dict_new();
- if (!options) {
- return EXIT_FAILURE;
- }
- ret = dict_set_str (options, "transport-type", "socket");
- if (ret != 0) {
- fprintf (stderr, "failed to set transport type\n");
- return EXIT_FAILURE;
- }
- ret = dict_set_str (options, "transport.address-family", "unix");
- if (ret != 0) {
- fprintf (stderr, "failed to set address family\n");
- return EXIT_FAILURE;
- }
- ret = dict_set_str (options, "transport.socket.connect-path",
- argv[optind]);
- if (ret != 0) {
- fprintf (stderr, "failed to set connect path\n");
- return EXIT_FAILURE;
- }
+ options = dict_new();
+ if (!options) {
+ return EXIT_FAILURE;
+ }
+ ret = dict_set_str(options, "transport-type", "socket");
+ if (ret != 0) {
+ fprintf(stderr, "failed to set transport type\n");
+ return EXIT_FAILURE;
+ }
+ ret = dict_set_str(options, "transport.address-family", "unix");
+ if (ret != 0) {
+ fprintf(stderr, "failed to set address family\n");
+ return EXIT_FAILURE;
+ }
+ ret = dict_set_str(options, "transport.socket.connect-path", argv[optind]);
+ if (ret != 0) {
+ fprintf(stderr, "failed to set connect path\n");
+ return EXIT_FAILURE;
+ }
- rpc = rpc_clnt_new (options, fs->ctx->master, "gf-attach-rpc", 0);
- if (!rpc) {
- fprintf (stderr, "rpc_clnt_new failed\n");
- return EXIT_FAILURE;
- }
+ rpc = rpc_clnt_new(options, fs->ctx->master, "gf-attach-rpc", 0);
+ if (!rpc) {
+ fprintf(stderr, "rpc_clnt_new failed\n");
+ return EXIT_FAILURE;
+ }
- if (rpc_clnt_register_notify (rpc, NULL, NULL) != 0) {
- fprintf (stderr, "rpc_clnt_register_notify failed\n");
- return EXIT_FAILURE;
- }
+ if (rpc_clnt_register_notify(rpc, NULL, NULL) != 0) {
+ fprintf(stderr, "rpc_clnt_register_notify failed\n");
+ return EXIT_FAILURE;
+ }
- if (rpc_clnt_start(rpc) != 0) {
- fprintf (stderr, "rpc_clnt_start failed\n");
- return EXIT_FAILURE;
- }
+ if (rpc_clnt_start(rpc) != 0) {
+ fprintf(stderr, "rpc_clnt_start failed\n");
+ return EXIT_FAILURE;
+ }
- return send_brick_req (fs->ctx->master, rpc, argv[optind+1], op);
+ return send_brick_req(fs->ctx->master, rpc, argv[optind + 1], op);
}
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index f7b1f367f97..88c9934e58e 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -38,2812 +38,2825 @@
static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false;
int need_emancipate = 0;
-int glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx);
-int glusterfs_volfile_fetch (glusterfs_ctx_t *ctx);
-int glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp);
-int glusterfs_graph_unknown_options (glusterfs_graph_t *graph);
-int emancipate(glusterfs_ctx_t *ctx, int ret);
+int
+glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx);
+int
+glusterfs_volfile_fetch(glusterfs_ctx_t *ctx);
+int
+glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp);
+int
+glusterfs_graph_unknown_options(glusterfs_graph_t *graph);
+int
+emancipate(glusterfs_ctx_t *ctx, int ret);
int
-mgmt_cbk_spec (struct rpc_clnt *rpc, void *mydata, void *data)
+mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
{
- glusterfs_ctx_t *ctx = NULL;
+ glusterfs_ctx_t *ctx = NULL;
- ctx = glusterfsd_ctx;
- gf_log ("mgmt", GF_LOG_INFO, "Volume file changed");
+ ctx = glusterfsd_ctx;
+ gf_log("mgmt", GF_LOG_INFO, "Volume file changed");
- glusterfs_volfile_fetch (ctx);
- return 0;
+ glusterfs_volfile_fetch(ctx);
+ return 0;
}
-
int
-mgmt_cbk_event (struct rpc_clnt *rpc, void *mydata, void *data)
+mgmt_cbk_event(struct rpc_clnt *rpc, void *mydata, void *data)
{
- return 0;
+ return 0;
}
struct iobuf *
-glusterfs_serialize_reply (rpcsvc_request_t *req, void *arg,
- struct iovec *outmsg, xdrproc_t xdrproc)
+glusterfs_serialize_reply(rpcsvc_request_t *req, void *arg,
+ struct iovec *outmsg, xdrproc_t xdrproc)
{
- struct iobuf *iob = NULL;
- ssize_t retlen = -1;
- ssize_t xdr_size = 0;
-
- /* First, get the io buffer into which the reply in arg will
- * be serialized.
- */
- xdr_size = xdr_sizeof (xdrproc, arg);
- iob = iobuf_get2 (req->svc->ctx->iobuf_pool, xdr_size);
- if (!iob) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to get iobuf");
- goto ret;
- }
-
- iobuf_to_iovec (iob, outmsg);
- /* Use the given serializer to translate the give C structure in arg
- * to XDR format which will be written into the buffer in outmsg.
- */
- /* retlen is used to received the error since size_t is unsigned and we
- * need -1 for error notification during encoding.
- */
- retlen = xdr_serialize_generic (*outmsg, arg, xdrproc);
- if (retlen == -1) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to encode message");
- goto ret;
- }
-
- outmsg->iov_len = retlen;
+ struct iobuf *iob = NULL;
+ ssize_t retlen = -1;
+ ssize_t xdr_size = 0;
+
+ /* First, get the io buffer into which the reply in arg will
+ * be serialized.
+ */
+ xdr_size = xdr_sizeof(xdrproc, arg);
+ iob = iobuf_get2(req->svc->ctx->iobuf_pool, xdr_size);
+ if (!iob) {
+ gf_log(THIS->name, GF_LOG_ERROR, "Failed to get iobuf");
+ goto ret;
+ }
+
+ iobuf_to_iovec(iob, outmsg);
+ /* Use the given serializer to translate the give C structure in arg
+ * to XDR format which will be written into the buffer in outmsg.
+ */
+ /* retlen is used to received the error since size_t is unsigned and we
+ * need -1 for error notification during encoding.
+ */
+ retlen = xdr_serialize_generic(*outmsg, arg, xdrproc);
+ if (retlen == -1) {
+ gf_log(THIS->name, GF_LOG_ERROR, "Failed to encode message");
+ goto ret;
+ }
+
+ outmsg->iov_len = retlen;
ret:
- if (retlen == -1) {
- iob = NULL;
- }
+ if (retlen == -1) {
+ iob = NULL;
+ }
- return iob;
+ return iob;
}
int
-glusterfs_submit_reply (rpcsvc_request_t *req, void *arg,
- struct iovec *payload, int payloadcount,
- struct iobref *iobref, xdrproc_t xdrproc)
+glusterfs_submit_reply(rpcsvc_request_t *req, void *arg, struct iovec *payload,
+ int payloadcount, struct iobref *iobref,
+ xdrproc_t xdrproc)
{
- struct iobuf *iob = NULL;
- int ret = -1;
- struct iovec rsp = {0,};
- char new_iobref = 0;
-
- if (!req) {
- GF_ASSERT (req);
- goto out;
- }
-
+ struct iobuf *iob = NULL;
+ int ret = -1;
+ struct iovec rsp = {
+ 0,
+ };
+ char new_iobref = 0;
+
+ if (!req) {
+ GF_ASSERT(req);
+ goto out;
+ }
+
+ if (!iobref) {
+ iobref = iobref_new();
if (!iobref) {
- iobref = iobref_new ();
- if (!iobref) {
- gf_log (THIS->name, GF_LOG_ERROR, "out of memory");
- goto out;
- }
-
- new_iobref = 1;
+ gf_log(THIS->name, GF_LOG_ERROR, "out of memory");
+ goto out;
}
- iob = glusterfs_serialize_reply (req, arg, &rsp, xdrproc);
- if (!iob) {
- gf_log_callingfn (THIS->name, GF_LOG_ERROR, "Failed to serialize reply");
- } else {
- iobref_add (iobref, iob);
- }
+ new_iobref = 1;
+ }
- ret = rpcsvc_submit_generic (req, &rsp, 1, payload, payloadcount,
- iobref);
+ iob = glusterfs_serialize_reply(req, arg, &rsp, xdrproc);
+ if (!iob) {
+ gf_log_callingfn(THIS->name, GF_LOG_ERROR, "Failed to serialize reply");
+ } else {
+ iobref_add(iobref, iob);
+ }
- /* Now that we've done our job of handing the message to the RPC layer
- * we can safely unref the iob in the hope that RPC layer must have
- * ref'ed the iob on receiving into the txlist.
- */
- if (ret == -1) {
- gf_log (THIS->name, GF_LOG_ERROR, "Reply submission failed");
- goto out;
- }
+ ret = rpcsvc_submit_generic(req, &rsp, 1, payload, payloadcount, iobref);
- ret = 0;
+ /* Now that we've done our job of handing the message to the RPC layer
+ * we can safely unref the iob in the hope that RPC layer must have
+ * ref'ed the iob on receiving into the txlist.
+ */
+ if (ret == -1) {
+ gf_log(THIS->name, GF_LOG_ERROR, "Reply submission failed");
+ goto out;
+ }
+
+ ret = 0;
out:
- if (iob)
- iobuf_unref (iob);
+ if (iob)
+ iobuf_unref(iob);
- if (new_iobref && iobref)
- iobref_unref (iobref);
+ if (new_iobref && iobref)
+ iobref_unref(iobref);
- return ret;
+ return ret;
}
int
-glusterfs_terminate_response_send (rpcsvc_request_t *req, int op_ret)
+glusterfs_terminate_response_send(rpcsvc_request_t *req, int op_ret)
{
- gd1_mgmt_brick_op_rsp rsp = {0,};
- dict_t *dict = NULL;
- int ret = 0;
-
- rsp.op_ret = op_ret;
- rsp.op_errno = 0;
- rsp.op_errstr = "";
- dict = dict_new ();
-
- if (dict)
- ret = dict_allocate_and_serialize (dict, &rsp.output.output_val,
- &rsp.output.output_len);
-
- if (ret == 0)
- ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ gd1_mgmt_brick_op_rsp rsp = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ int ret = 0;
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = 0;
+ rsp.op_errstr = "";
+ dict = dict_new();
+
+ if (dict)
+ ret = dict_allocate_and_serialize(dict, &rsp.output.output_val,
+ &rsp.output.output_len);
+
+ if (ret == 0)
+ ret = glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
- GF_FREE (rsp.output.output_val);
- if (dict)
- dict_unref (dict);
- return ret;
+ GF_FREE(rsp.output.output_val);
+ if (dict)
+ dict_unref(dict);
+ return ret;
}
int
-glusterfs_handle_terminate (rpcsvc_request_t *req)
+glusterfs_handle_terminate(rpcsvc_request_t *req)
{
- gd1_mgmt_brick_op_req xlator_req = {0,};
- ssize_t ret;
- glusterfs_ctx_t *ctx = NULL;
- xlator_t *top = NULL;
- xlator_t *victim = NULL;
- xlator_t *tvictim = NULL;
- xlator_list_t **trav_p = NULL;
- gf_boolean_t lockflag = _gf_false;
- gf_boolean_t still_bricks_attached = _gf_false;
-
- ret = xdr_to_generic (req->msg[0], &xlator_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- return -1;
- }
- ctx = glusterfsd_ctx;
-
- LOCK (&ctx->volfile_lock);
- {
- /* Find the xlator_list_t that points to our victim. */
- if (glusterfsd_ctx->active) {
- top = glusterfsd_ctx->active->first;
- for (trav_p = &top->children; *trav_p;
- trav_p = &(*trav_p)->next) {
- victim = (*trav_p)->xlator;
- if (!victim->cleanup_starting && strcmp (victim->name, xlator_req.name) == 0) {
- break;
- }
- }
- }
-
- if (!top)
- goto err;
-
- }
- if (!*trav_p) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "can't terminate %s - not found",
- xlator_req.name);
- /*
- * Used to be -ENOENT. However, the caller asked us to
- * make sure it's down and if it's already down that's
- * good enough.
- */
- glusterfs_terminate_response_send (req, 0);
- goto err;
- }
-
- glusterfs_terminate_response_send (req, 0);
- for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
- tvictim = (*trav_p)->xlator;
- if (!tvictim->cleanup_starting &&
- !strcmp (tvictim->name, xlator_req.name)) {
- continue;
- }
- if (!tvictim->cleanup_starting) {
- still_bricks_attached = _gf_true;
- break;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ ssize_t ret;
+ glusterfs_ctx_t *ctx = NULL;
+ xlator_t *top = NULL;
+ xlator_t *victim = NULL;
+ xlator_t *tvictim = NULL;
+ xlator_list_t **trav_p = NULL;
+ gf_boolean_t lockflag = _gf_false;
+ gf_boolean_t still_bricks_attached = _gf_false;
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ return -1;
+ }
+ ctx = glusterfsd_ctx;
+
+ LOCK(&ctx->volfile_lock);
+ {
+ /* Find the xlator_list_t that points to our victim. */
+ if (glusterfsd_ctx->active) {
+ top = glusterfsd_ctx->active->first;
+ for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
+ victim = (*trav_p)->xlator;
+ if (!victim->cleanup_starting &&
+ strcmp(victim->name, xlator_req.name) == 0) {
+ break;
}
+ }
}
- if (!still_bricks_attached) {
- gf_log (THIS->name, GF_LOG_INFO,
- "terminating after loss of last child %s",
- xlator_req.name);
- rpc_clnt_mgmt_pmap_signout (glusterfsd_ctx, xlator_req.name);
- kill (getpid(), SIGTERM);
- } else {
- /* TODO cleanup sequence needs to be done properly for
- Quota and Changelog
- */
- if (victim->cleanup_starting)
- goto err;
-
- rpc_clnt_mgmt_pmap_signout (glusterfsd_ctx, xlator_req.name);
- victim->cleanup_starting = 1;
-
- UNLOCK (&ctx->volfile_lock);
- lockflag = _gf_true;
- gf_log (THIS->name, GF_LOG_INFO, "detaching not-only"
- " child %s", xlator_req.name);
- top->notify (top, GF_EVENT_CLEANUP, victim);
-
- }
+ if (!top)
+ goto err;
+ }
+ if (!*trav_p) {
+ gf_log(THIS->name, GF_LOG_ERROR, "can't terminate %s - not found",
+ xlator_req.name);
+ /*
+ * Used to be -ENOENT. However, the caller asked us to
+ * make sure it's down and if it's already down that's
+ * good enough.
+ */
+ glusterfs_terminate_response_send(req, 0);
+ goto err;
+ }
+
+ glusterfs_terminate_response_send(req, 0);
+ for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
+ tvictim = (*trav_p)->xlator;
+ if (!tvictim->cleanup_starting &&
+ !strcmp(tvictim->name, xlator_req.name)) {
+ continue;
+ }
+ if (!tvictim->cleanup_starting) {
+ still_bricks_attached = _gf_true;
+ break;
+ }
+ }
+ if (!still_bricks_attached) {
+ gf_log(THIS->name, GF_LOG_INFO,
+ "terminating after loss of last child %s", xlator_req.name);
+ rpc_clnt_mgmt_pmap_signout(glusterfsd_ctx, xlator_req.name);
+ kill(getpid(), SIGTERM);
+ } else {
+ /* TODO cleanup sequence needs to be done properly for
+ Quota and Changelog
+ */
+ if (victim->cleanup_starting)
+ goto err;
+
+ rpc_clnt_mgmt_pmap_signout(glusterfsd_ctx, xlator_req.name);
+ victim->cleanup_starting = 1;
+
+ UNLOCK(&ctx->volfile_lock);
+ lockflag = _gf_true;
+
+ gf_log(THIS->name, GF_LOG_INFO,
+ "detaching not-only"
+ " child %s",
+ xlator_req.name);
+ top->notify(top, GF_EVENT_CLEANUP, victim);
+ }
err:
- if (!lockflag)
- UNLOCK (&ctx->volfile_lock);
- free (xlator_req.name);
- xlator_req.name = NULL;
- return 0;
+ if (!lockflag)
+ UNLOCK(&ctx->volfile_lock);
+ free(xlator_req.name);
+ xlator_req.name = NULL;
+ return 0;
}
int
-glusterfs_translator_info_response_send (rpcsvc_request_t *req, int ret,
- char *msg, dict_t *output)
+glusterfs_translator_info_response_send(rpcsvc_request_t *req, int ret,
+ char *msg, dict_t *output)
{
- gd1_mgmt_brick_op_rsp rsp = {0,};
- gf_boolean_t free_ptr = _gf_false;
- GF_ASSERT (req);
-
- rsp.op_ret = ret;
- rsp.op_errno = 0;
- if (ret && msg && msg[0])
- rsp.op_errstr = msg;
- else
- rsp.op_errstr = "";
-
- ret = -1;
- if (output) {
- ret = dict_allocate_and_serialize (output,
- &rsp.output.output_val,
- &rsp.output.output_len);
- }
- if (!ret)
- free_ptr = _gf_true;
+ gd1_mgmt_brick_op_rsp rsp = {
+ 0,
+ };
+ gf_boolean_t free_ptr = _gf_false;
+ GF_ASSERT(req);
+
+ rsp.op_ret = ret;
+ rsp.op_errno = 0;
+ if (ret && msg && msg[0])
+ rsp.op_errstr = msg;
+ else
+ rsp.op_errstr = "";
- glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
- ret = 0;
- if (free_ptr)
- GF_FREE (rsp.output.output_val);
- return ret;
+ ret = -1;
+ if (output) {
+ ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
+ &rsp.output.output_len);
+ }
+ if (!ret)
+ free_ptr = _gf_true;
+
+ glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = 0;
+ if (free_ptr)
+ GF_FREE(rsp.output.output_val);
+ return ret;
}
int
-glusterfs_xlator_op_response_send (rpcsvc_request_t *req, int op_ret,
- char *msg, dict_t *output)
+glusterfs_xlator_op_response_send(rpcsvc_request_t *req, int op_ret, char *msg,
+ dict_t *output)
{
- gd1_mgmt_brick_op_rsp rsp = {0,};
- int ret = -1;
- gf_boolean_t free_ptr = _gf_false;
- GF_ASSERT (req);
-
- rsp.op_ret = op_ret;
- rsp.op_errno = 0;
- if (op_ret && msg && msg[0])
- rsp.op_errstr = msg;
- else
- rsp.op_errstr = "";
+ gd1_mgmt_brick_op_rsp rsp = {
+ 0,
+ };
+ int ret = -1;
+ gf_boolean_t free_ptr = _gf_false;
+ GF_ASSERT(req);
+
+ rsp.op_ret = op_ret;
+ rsp.op_errno = 0;
+ if (op_ret && msg && msg[0])
+ rsp.op_errstr = msg;
+ else
+ rsp.op_errstr = "";
- if (output) {
- ret = dict_allocate_and_serialize (output,
- &rsp.output.output_val,
- &rsp.output.output_len);
- }
- if (!ret)
- free_ptr = _gf_true;
+ if (output) {
+ ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
+ &rsp.output.output_len);
+ }
+ if (!ret)
+ free_ptr = _gf_true;
- ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
- if (free_ptr)
- GF_FREE (rsp.output.output_val);
+ if (free_ptr)
+ GF_FREE(rsp.output.output_val);
- return ret;
+ return ret;
}
int
-glusterfs_handle_translator_info_get (rpcsvc_request_t *req)
+glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_brick_op_req xlator_req = {0,};
- dict_t *dict = NULL;
- xlator_t *this = NULL;
- gf1_cli_top_op top_op = 0;
- uint32_t blk_size = 0;
- uint32_t blk_count = 0;
- double time = 0;
- double throughput = 0;
- xlator_t *any = NULL;
- xlator_t *xlator = NULL;
- glusterfs_graph_t *active = NULL;
- glusterfs_ctx_t *ctx = NULL;
- char msg[2048] = {0,};
- dict_t *output = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &xlator_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- dict = dict_new ();
- ret = dict_unserialize (xlator_req.input.input_val,
- xlator_req.input.input_len,
- &dict);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- }
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ gf1_cli_top_op top_op = 0;
+ uint32_t blk_size = 0;
+ uint32_t blk_count = 0;
+ double time = 0;
+ double throughput = 0;
+ xlator_t *any = NULL;
+ xlator_t *xlator = NULL;
+ glusterfs_graph_t *active = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ dict_t *output = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ dict = dict_new();
+ ret = dict_unserialize(xlator_req.input.input_val,
+ xlator_req.input.input_len, &dict);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+
+ ret = dict_get_int32(dict, "top-op", (int32_t *)&top_op);
+ if ((!ret) &&
+ (GF_CLI_TOP_READ_PERF == top_op || GF_CLI_TOP_WRITE_PERF == top_op)) {
+ ret = dict_get_uint32(dict, "blk-size", &blk_size);
+ if (ret)
+ goto cont;
+ ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
+ if (ret)
+ goto cont;
- ret = dict_get_int32 (dict, "top-op", (int32_t *)&top_op);
- if ((!ret) && (GF_CLI_TOP_READ_PERF == top_op ||
- GF_CLI_TOP_WRITE_PERF == top_op)) {
- ret = dict_get_uint32 (dict, "blk-size", &blk_size);
- if (ret)
- goto cont;
- ret = dict_get_uint32 (dict, "blk-cnt", &blk_count);
- if (ret)
- goto cont;
-
- if (GF_CLI_TOP_READ_PERF == top_op) {
- ret = glusterfs_volume_top_read_perf
- (blk_size, blk_count, xlator_req.name,
- &throughput, &time);
- } else if ( GF_CLI_TOP_WRITE_PERF == top_op) {
- ret = glusterfs_volume_top_write_perf
- (blk_size, blk_count, xlator_req.name,
- &throughput, &time);
- }
- ret = dict_set_double (dict, "time", time);
- if (ret)
- goto cont;
- ret = dict_set_double (dict, "throughput", throughput);
- if (ret)
- goto cont;
+ if (GF_CLI_TOP_READ_PERF == top_op) {
+ ret = glusterfs_volume_top_read_perf(
+ blk_size, blk_count, xlator_req.name, &throughput, &time);
+ } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
+ ret = glusterfs_volume_top_write_perf(
+ blk_size, blk_count, xlator_req.name, &throughput, &time);
}
+ ret = dict_set_double(dict, "time", time);
+ if (ret)
+ goto cont;
+ ret = dict_set_double(dict, "throughput", throughput);
+ if (ret)
+ goto cont;
+ }
cont:
- ctx = glusterfsd_ctx;
- GF_ASSERT (ctx);
- active = ctx->active;
- any = active->first;
-
- xlator = get_xlator_by_name (any, xlator_req.name);
- if (!xlator) {
- snprintf (msg, sizeof (msg), "xlator %s is not loaded",
- xlator_req.name);
- goto out;
- }
-
- /*
- * Searching by name will only get us to the decompounder translator,
- * but we really want io-stats. Since we know the exact relationship
- * between these two, it's easy to get from one to the other.
- *
- * TBD: should this even be notify, or something else?
- */
- xlator = FIRST_CHILD(xlator);
-
- output = dict_new ();
- ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_INFO, dict, output);
+ ctx = glusterfsd_ctx;
+ GF_ASSERT(ctx);
+ active = ctx->active;
+ any = active->first;
+
+ xlator = get_xlator_by_name(any, xlator_req.name);
+ if (!xlator) {
+ snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);
+ goto out;
+ }
+
+ /*
+ * Searching by name will only get us to the decompounder translator,
+ * but we really want io-stats. Since we know the exact relationship
+ * between these two, it's easy to get from one to the other.
+ *
+ * TBD: should this even be notify, or something else?
+ */
+ xlator = FIRST_CHILD(xlator);
+
+ output = dict_new();
+ ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_INFO, dict, output);
out:
- ret = glusterfs_translator_info_response_send (req, ret, msg, output);
-
- free (xlator_req.name);
- free (xlator_req.input.input_val);
- if (output)
- dict_unref (output);
- if (dict)
- dict_unref (dict);
- return ret;
+ ret = glusterfs_translator_info_response_send(req, ret, msg, output);
+
+ free(xlator_req.name);
+ free(xlator_req.input.input_val);
+ if (output)
+ dict_unref(output);
+ if (dict)
+ dict_unref(dict);
+ return ret;
}
int
-glusterfs_volume_top_write_perf (uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
+glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
+ char *brick_path, double *throughput,
+ double *time)
{
- int32_t fd = -1;
- int32_t input_fd = -1;
- char export_path[PATH_MAX] = {0,};
- char *buf = NULL;
- int32_t iter = 0;
- int32_t ret = -1;
- uint64_t total_blks = 0;
- struct timeval begin, end = {0,};
-
- GF_ASSERT (brick_path);
- GF_ASSERT (throughput);
- GF_ASSERT (time);
- if (!(blk_size > 0) || ! (blk_count > 0))
- goto out;
-
- snprintf (export_path, sizeof (export_path), "%s/%s",
- brick_path, ".gf-tmp-stats-perf");
-
- fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log ("glusterd", GF_LOG_ERROR, "Could not open tmp file");
- goto out;
- }
-
- buf = GF_MALLOC (blk_size * sizeof(*buf), gf_common_mt_char);
- if (!buf) {
- ret = -1;
- goto out;
- }
+ int32_t fd = -1;
+ int32_t input_fd = -1;
+ char export_path[PATH_MAX] = {
+ 0,
+ };
+ char *buf = NULL;
+ int32_t iter = 0;
+ int32_t ret = -1;
+ uint64_t total_blks = 0;
+ struct timeval begin, end = {
+ 0,
+ };
+
+ GF_ASSERT(brick_path);
+ GF_ASSERT(throughput);
+ GF_ASSERT(time);
+ if (!(blk_size > 0) || !(blk_count > 0))
+ goto out;
+
+ snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
+ ".gf-tmp-stats-perf");
+
+ fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
+ if (-1 == fd) {
+ ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ goto out;
+ }
- input_fd = open ("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
- ret = -1;
- gf_log ("glusterd",GF_LOG_ERROR, "Unable to open input file");
- goto out;
- }
+ buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
+ if (!buf) {
+ ret = -1;
+ goto out;
+ }
- gettimeofday (&begin, NULL);
- for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read (input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- ret = sys_write (fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- total_blks += ret;
- }
- ret = 0;
- if (total_blks != ((uint64_t)blk_size * blk_count)) {
- gf_log ("glusterd", GF_LOG_WARNING, "Error in write");
- ret = -1;
- goto out;
- }
+ input_fd = open("/dev/zero", O_RDONLY);
+ if (-1 == input_fd) {
+ ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Unable to open input file");
+ goto out;
+ }
+
+ gettimeofday(&begin, NULL);
+ for (iter = 0; iter < blk_count; iter++) {
+ ret = sys_read(input_fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ ret = sys_write(fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ total_blks += ret;
+ }
+ ret = 0;
+ if (total_blks != ((uint64_t)blk_size * blk_count)) {
+ gf_log("glusterd", GF_LOG_WARNING, "Error in write");
+ ret = -1;
+ goto out;
+ }
- gettimeofday (&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6
- + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
- gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs "
- "bytes written %"PRId64, *throughput, *time, total_blks);
+ gettimeofday(&end, NULL);
+ *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
+ *throughput = total_blks / *time;
+ gf_log("glusterd", GF_LOG_INFO,
+ "Throughput %.2f Mbps time %.2f secs "
+ "bytes written %" PRId64,
+ *throughput, *time, total_blks);
out:
- if (fd >= 0)
- sys_close (fd);
- if (input_fd >= 0)
- sys_close (input_fd);
- GF_FREE (buf);
- sys_unlink (export_path);
-
- return ret;
+ if (fd >= 0)
+ sys_close(fd);
+ if (input_fd >= 0)
+ sys_close(input_fd);
+ GF_FREE(buf);
+ sys_unlink(export_path);
+
+ return ret;
}
int
-glusterfs_volume_top_read_perf (uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
+glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
+ char *brick_path, double *throughput,
+ double *time)
{
- int32_t fd = -1;
- int32_t input_fd = -1;
- int32_t output_fd = -1;
- char export_path[PATH_MAX] = {0,};
- char *buf = NULL;
- int32_t iter = 0;
- int32_t ret = -1;
- uint64_t total_blks = 0;
- struct timeval begin, end = {0,};
-
- GF_ASSERT (brick_path);
- GF_ASSERT (throughput);
- GF_ASSERT (time);
- if (!(blk_size > 0) || ! (blk_count > 0))
- goto out;
-
- snprintf (export_path, sizeof (export_path), "%s/%s",
- brick_path, ".gf-tmp-stats-perf");
- fd = open (export_path, O_CREAT|O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log ("glusterd", GF_LOG_ERROR, "Could not open tmp file");
- goto out;
- }
-
- buf = GF_MALLOC (blk_size * sizeof(*buf), gf_common_mt_char);
- if (!buf) {
- ret = -1;
- gf_log ("glusterd", GF_LOG_ERROR, "Could not allocate memory");
- goto out;
- }
-
- input_fd = open ("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
- ret = -1;
- gf_log ("glusterd", GF_LOG_ERROR, "Could not open input file");
- goto out;
- }
-
- output_fd = open ("/dev/null", O_RDWR);
- if (-1 == output_fd) {
- ret = -1;
- gf_log ("glusterd", GF_LOG_ERROR, "Could not open output file");
- goto out;
- }
+ int32_t fd = -1;
+ int32_t input_fd = -1;
+ int32_t output_fd = -1;
+ char export_path[PATH_MAX] = {
+ 0,
+ };
+ char *buf = NULL;
+ int32_t iter = 0;
+ int32_t ret = -1;
+ uint64_t total_blks = 0;
+ struct timeval begin, end = {
+ 0,
+ };
+
+ GF_ASSERT(brick_path);
+ GF_ASSERT(throughput);
+ GF_ASSERT(time);
+ if (!(blk_size > 0) || !(blk_count > 0))
+ goto out;
+
+ snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
+ ".gf-tmp-stats-perf");
+ fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
+ if (-1 == fd) {
+ ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ goto out;
+ }
- for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read (input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- ret = sys_write (fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- }
+ buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
+ if (!buf) {
+ ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
+ goto out;
+ }
- ret = sys_fsync (fd);
- if (ret) {
- gf_log ("glusterd", GF_LOG_ERROR, "could not flush cache");
- goto out;
- }
- ret = sys_lseek (fd, 0L, 0);
- if (ret != 0) {
- gf_log ("glusterd", GF_LOG_ERROR,
- "could not seek back to start");
- ret = -1;
- goto out;
- }
- gettimeofday (&begin, NULL);
- for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read (fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- ret = sys_write (output_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- total_blks += ret;
- }
- ret = 0;
- if (total_blks != ((uint64_t)blk_size * blk_count)) {
- ret = -1;
- gf_log ("glusterd", GF_LOG_WARNING, "Error in read");
- goto out;
- }
+ input_fd = open("/dev/zero", O_RDONLY);
+ if (-1 == input_fd) {
+ ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Could not open input file");
+ goto out;
+ }
- gettimeofday (&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6
- + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
- gf_log ("glusterd", GF_LOG_INFO, "Throughput %.2f Mbps time %.2f secs "
- "bytes read %"PRId64, *throughput, *time, total_blks);
+ output_fd = open("/dev/null", O_RDWR);
+ if (-1 == output_fd) {
+ ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Could not open output file");
+ goto out;
+ }
+
+ for (iter = 0; iter < blk_count; iter++) {
+ ret = sys_read(input_fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ ret = sys_write(fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = sys_fsync(fd);
+ if (ret) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
+ goto out;
+ }
+ ret = sys_lseek(fd, 0L, 0);
+ if (ret != 0) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
+ ret = -1;
+ goto out;
+ }
+ gettimeofday(&begin, NULL);
+ for (iter = 0; iter < blk_count; iter++) {
+ ret = sys_read(fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ ret = sys_write(output_fd, buf, blk_size);
+ if (ret != blk_size) {
+ ret = -1;
+ goto out;
+ }
+ total_blks += ret;
+ }
+ ret = 0;
+ if (total_blks != ((uint64_t)blk_size * blk_count)) {
+ ret = -1;
+ gf_log("glusterd", GF_LOG_WARNING, "Error in read");
+ goto out;
+ }
+
+ gettimeofday(&end, NULL);
+ *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
+ *throughput = total_blks / *time;
+ gf_log("glusterd", GF_LOG_INFO,
+ "Throughput %.2f Mbps time %.2f secs "
+ "bytes read %" PRId64,
+ *throughput, *time, total_blks);
out:
- if (fd >= 0)
- sys_close (fd);
- if (input_fd >= 0)
- sys_close (input_fd);
- if (output_fd >= 0)
- sys_close (output_fd);
- GF_FREE (buf);
- sys_unlink (export_path);
-
- return ret;
+ if (fd >= 0)
+ sys_close(fd);
+ if (input_fd >= 0)
+ sys_close(input_fd);
+ if (output_fd >= 0)
+ sys_close(output_fd);
+ GF_FREE(buf);
+ sys_unlink(export_path);
+
+ return ret;
}
int
-glusterfs_handle_translator_op (rpcsvc_request_t *req)
+glusterfs_handle_translator_op(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- int32_t op_ret = 0;
- gd1_mgmt_brick_op_req xlator_req = {0,};
- dict_t *input = NULL;
- xlator_t *xlator = NULL;
- xlator_t *any = NULL;
- dict_t *output = NULL;
- char key[2048] = {0};
- char *xname = NULL;
- glusterfs_ctx_t *ctx = NULL;
- glusterfs_graph_t *active = NULL;
- xlator_t *this = NULL;
- int i = 0;
- int count = 0;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &xlator_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- ctx = glusterfsd_ctx;
- active = ctx->active;
- if (!active) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
- "Not processing brick-op no. %d since volume graph is "
- "not yet active.", xlator_req.op);
- goto out;
- }
- any = active->first;
- input = dict_new ();
- ret = dict_unserialize (xlator_req.input.input_val,
- xlator_req.input.input_len,
- &input);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- } else {
- input->extra_stdfree = xlator_req.input.input_val;
- }
-
- ret = dict_get_int32 (input, "count", &count);
-
- output = dict_new ();
- if (!output) {
- ret = -1;
- goto out;
- }
+ int32_t ret = -1;
+ int32_t op_ret = 0;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ dict_t *input = NULL;
+ xlator_t *xlator = NULL;
+ xlator_t *any = NULL;
+ dict_t *output = NULL;
+ char key[2048] = {0};
+ char *xname = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *this = NULL;
+ int i = 0;
+ int count = 0;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ active = ctx->active;
+ if (!active) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
+ "Not processing brick-op no. %d since volume graph is "
+ "not yet active.",
+ xlator_req.op);
+ goto out;
+ }
+ any = active->first;
+ input = dict_new();
+ ret = dict_unserialize(xlator_req.input.input_val,
+ xlator_req.input.input_len, &input);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ } else {
+ input->extra_stdfree = xlator_req.input.input_val;
+ }
+
+ ret = dict_get_int32(input, "count", &count);
+
+ output = dict_new();
+ if (!output) {
+ ret = -1;
+ goto out;
+ }
- for (i = 0; i < count; i++) {
- snprintf (key, sizeof (key), "xl-%d", i);
- ret = dict_get_str (input, key, &xname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Couldn't get "
- "xlator %s ", key);
- goto out;
- }
- xlator = xlator_search_by_name (any, xname);
- if (!xlator) {
- gf_log (this->name, GF_LOG_ERROR, "xlator %s is not "
- "loaded", xname);
- goto out;
- }
- }
- for (i = 0; i < count; i++) {
- snprintf (key, sizeof (key), "xl-%d", i);
- ret = dict_get_str (input, key, &xname);
- xlator = xlator_search_by_name (any, xname);
- XLATOR_NOTIFY (ret, xlator, GF_EVENT_TRANSLATOR_OP, input, output);
- /* If notify fails for an xlator we need to capture it but
- * continue with the loop. */
- if (ret)
- op_ret = -1;
+ for (i = 0; i < count; i++) {
+ snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_str(input, key, &xname);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Couldn't get "
+ "xlator %s ",
+ key);
+ goto out;
}
- ret = op_ret;
+ xlator = xlator_search_by_name(any, xname);
+ if (!xlator) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "xlator %s is not "
+ "loaded",
+ xname);
+ goto out;
+ }
+ }
+ for (i = 0; i < count; i++) {
+ snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_str(input, key, &xname);
+ xlator = xlator_search_by_name(any, xname);
+ XLATOR_NOTIFY(ret, xlator, GF_EVENT_TRANSLATOR_OP, input, output);
+ /* If notify fails for an xlator we need to capture it but
+ * continue with the loop. */
+ if (ret)
+ op_ret = -1;
+ }
+ ret = op_ret;
out:
- glusterfs_xlator_op_response_send (req, ret, "", output);
- if (input)
- dict_unref (input);
- if (output)
- dict_unref (output);
- free (xlator_req.name); //malloced by xdr
-
- return 0;
+ glusterfs_xlator_op_response_send(req, ret, "", output);
+ if (input)
+ dict_unref(input);
+ if (output)
+ dict_unref(output);
+ free(xlator_req.name); // malloced by xdr
+
+ return 0;
}
int
-glusterfs_handle_bitrot (rpcsvc_request_t *req)
+glusterfs_handle_bitrot(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_brick_op_req xlator_req = {0,};
- dict_t *input = NULL;
- dict_t *output = NULL;
- xlator_t *any = NULL;
- xlator_t *this = NULL;
- xlator_t *xlator = NULL;
- char msg[2048] = {0,};
- char xname[1024] = {0,};
- glusterfs_ctx_t *ctx = NULL;
- glusterfs_graph_t *active = NULL;
- char *scrub_opt = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &xlator_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
-
- if (ret < 0) {
- /*failed to decode msg;*/
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- ctx = glusterfsd_ctx;
- GF_ASSERT (ctx);
-
- active = ctx->active;
- if (!active) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- any = active->first;
-
- input = dict_new ();
- if (!input)
- goto out;
-
- ret = dict_unserialize (xlator_req.input.input_val,
- xlator_req.input.input_len,
- &input);
-
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35,
- "rpc req buffer unserialization failed.");
- goto out;
- }
-
- /* Send scrubber request to bitrot xlator */
- snprintf (xname, sizeof (xname), "%s-bit-rot-0", xlator_req.name);
- xlator = xlator_search_by_name (any, xname);
- if (!xlator) {
- snprintf (msg, sizeof (msg), "xlator %s is not loaded", xname);
- gf_msg (this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36,
- "problem in xlator loading.");
- goto out;
- }
-
- output = dict_new ();
- if (!output) {
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str (input, "scrub-value", &scrub_opt);
- if (ret) {
- snprintf (msg, sizeof (msg), "Failed to get scrub value");
- gf_msg (this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37,
- "failed to get dict value");
- ret = -1;
- goto out;
- }
-
- if (!strncmp (scrub_opt, "status", SLEN ("status"))) {
- ret = xlator->notify (xlator, GF_EVENT_SCRUB_STATUS, input,
- output);
- } else if (!strncmp (scrub_opt, "ondemand", SLEN ("ondemand"))) {
- ret = xlator->notify (xlator, GF_EVENT_SCRUB_ONDEMAND, input,
- output);
- if (ret == -2) {
- snprintf (msg, sizeof (msg), "Scrubber is in "
- "Pause/Inactive/Running state");
- ret = -1;
- goto out;
- }
- }
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ dict_t *input = NULL;
+ dict_t *output = NULL;
+ xlator_t *any = NULL;
+ xlator_t *this = NULL;
+ xlator_t *xlator = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ char xname[1024] = {
+ 0,
+ };
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ char *scrub_opt = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+
+ if (ret < 0) {
+ /*failed to decode msg;*/
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT(ctx);
+
+ active = ctx->active;
+ if (!active) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ any = active->first;
+
+ input = dict_new();
+ if (!input)
+ goto out;
+
+ ret = dict_unserialize(xlator_req.input.input_val,
+ xlator_req.input.input_len, &input);
+
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35,
+ "rpc req buffer unserialization failed.");
+ goto out;
+ }
+
+ /* Send scrubber request to bitrot xlator */
+ snprintf(xname, sizeof(xname), "%s-bit-rot-0", xlator_req.name);
+ xlator = xlator_search_by_name(any, xname);
+ if (!xlator) {
+ snprintf(msg, sizeof(msg), "xlator %s is not loaded", xname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36,
+ "problem in xlator loading.");
+ goto out;
+ }
+
+ output = dict_new();
+ if (!output) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str(input, "scrub-value", &scrub_opt);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Failed to get scrub value");
+ gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37,
+ "failed to get dict value");
+ ret = -1;
+ goto out;
+ }
+
+ if (!strncmp(scrub_opt, "status", SLEN("status"))) {
+ ret = xlator->notify(xlator, GF_EVENT_SCRUB_STATUS, input, output);
+ } else if (!strncmp(scrub_opt, "ondemand", SLEN("ondemand"))) {
+ ret = xlator->notify(xlator, GF_EVENT_SCRUB_ONDEMAND, input, output);
+ if (ret == -2) {
+ snprintf(msg, sizeof(msg),
+ "Scrubber is in "
+ "Pause/Inactive/Running state");
+ ret = -1;
+ goto out;
+ }
+ }
out:
- glusterfs_translator_info_response_send (req, ret, msg, output);
+ glusterfs_translator_info_response_send(req, ret, msg, output);
- if (input)
- dict_unref (input);
- free (xlator_req.input.input_val); /*malloced by xdr*/
- if (output)
- dict_unref (output);
- free (xlator_req.name);
+ if (input)
+ dict_unref(input);
+ free(xlator_req.input.input_val); /*malloced by xdr*/
+ if (output)
+ dict_unref(output);
+ free(xlator_req.name);
- return 0;
+ return 0;
}
int
-glusterfs_handle_attach (rpcsvc_request_t *req)
+glusterfs_handle_attach(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_brick_op_req xlator_req = {0,};
- xlator_t *this = NULL;
- xlator_t *nextchild = NULL;
- glusterfs_graph_t *newgraph = NULL;
- glusterfs_ctx_t *ctx = NULL;
- xlator_t *srv_xl = NULL;
- server_conf_t *srv_conf = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ctx = this->ctx;
- if (!ctx->cmd_args.volfile_id) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "No volfile-id provided, erroring out");
- return -1;
- }
-
- ret = xdr_to_generic (req->msg[0], &xlator_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
-
- if (ret < 0) {
- /*failed to decode msg;*/
- req->rpc_err = GARBAGE_ARGS;
- return -1;
- }
- ret = 0;
-
- LOCK (&ctx->volfile_lock);
- {
- if (this->ctx->active) {
- gf_log (this->name, GF_LOG_INFO,
- "got attach for %s", xlator_req.name);
- ret = glusterfs_graph_attach (this->ctx->active,
- xlator_req.name, &newgraph);
- if (!ret && (newgraph && newgraph->first)) {
- nextchild = newgraph->first;
- ret = xlator_notify (nextchild,
- GF_EVENT_PARENT_UP,
- nextchild);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- 0,
- LG_MSG_EVENT_NOTIFY_FAILED,
- "Parent up notification "
- "failed for %s ",
- nextchild->name);
- goto out;
- }
- /* we need a protocol/server xlator as
- * nextchild
- */
- srv_xl = this->ctx->active->first;
- srv_conf = (server_conf_t *)srv_xl->private;
- rpcsvc_autoscale_threads (this->ctx,
- srv_conf->rpc, 1);
- }
- } else {
- gf_log (this->name, GF_LOG_WARNING,
- "got attach for %s but no active graph",
- xlator_req.name);
- }
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ xlator_t *nextchild = NULL;
+ glusterfs_graph_t *newgraph = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ xlator_t *srv_xl = NULL;
+ server_conf_t *srv_conf = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ctx = this->ctx;
+ if (!ctx->cmd_args.volfile_id) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "No volfile-id provided, erroring out");
+ return -1;
+ }
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+
+ if (ret < 0) {
+ /*failed to decode msg;*/
+ req->rpc_err = GARBAGE_ARGS;
+ return -1;
+ }
+ ret = 0;
+
+ LOCK(&ctx->volfile_lock);
+ {
+ if (this->ctx->active) {
+ gf_log(this->name, GF_LOG_INFO, "got attach for %s",
+ xlator_req.name);
+ ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
+ &newgraph);
+ if (!ret && (newgraph && newgraph->first)) {
+ nextchild = newgraph->first;
+ ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
if (ret) {
- ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ LG_MSG_EVENT_NOTIFY_FAILED,
+ "Parent up notification "
+ "failed for %s ",
+ nextchild->name);
+ goto out;
}
+ /* we need a protocol/server xlator as
+ * nextchild
+ */
+ srv_xl = this->ctx->active->first;
+ srv_conf = (server_conf_t *)srv_xl->private;
+ rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
+ }
+ } else {
+ gf_log(this->name, GF_LOG_WARNING,
+ "got attach for %s but no active graph", xlator_req.name);
+ }
+ if (ret) {
+ ret = -1;
+ }
- glusterfs_translator_info_response_send (req, ret, NULL, NULL);
+ glusterfs_translator_info_response_send(req, ret, NULL, NULL);
-out:
- UNLOCK (&ctx->volfile_lock);
- }
- free (xlator_req.input.input_val);
- free (xlator_req.name);
+ out:
+ UNLOCK(&ctx->volfile_lock);
+ }
+ free(xlator_req.input.input_val);
+ free(xlator_req.name);
- return ret;
+ return ret;
}
int
-glusterfs_handle_dump_metrics (rpcsvc_request_t *req)
+glusterfs_handle_dump_metrics(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_brick_op_req xlator_req = {0,};
- xlator_t *this = NULL;
- glusterfs_ctx_t *ctx = NULL;
- char *filepath = NULL;
- int fd = -1;
- struct stat statbuf = {0,};
- char *msg = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &xlator_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
-
- if (ret < 0) {
- /*failed to decode msg;*/
- req->rpc_err = GARBAGE_ARGS;
- return -1;
- }
- ret = -1;
- ctx = this->ctx;
-
- /* Infra for monitoring */
- filepath = gf_monitor_metrics (ctx);
- if (!filepath)
- goto out;
-
- fd = sys_open (filepath, O_RDONLY, 0);
- if (fd < 0)
- goto out;
-
- if (sys_fstat (fd, &statbuf) < 0)
- goto out;
-
- if (statbuf.st_size > GF_UNIT_MB) {
- gf_msg (this->name, GF_LOG_WARNING, ENOMEM,
- LG_MSG_NO_MEMORY,
- "Allocated size exceeds expectation: "
- "reconsider logic (%"GF_PRI_SIZET")",
- statbuf.st_size);
- }
- msg = GF_CALLOC (1, (statbuf.st_size + 1), gf_common_mt_char);
- if (!msg)
- goto out;
-
- ret = sys_read (fd, msg, statbuf.st_size);
- if (ret < 0)
- goto out;
-
- /* Send all the data in errstr, instead of dictionary for now */
- glusterfs_translator_info_response_send (req, 0, msg, NULL);
-
- ret = 0;
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ char *filepath = NULL;
+ int fd = -1;
+ struct stat statbuf = {
+ 0,
+ };
+ char *msg = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+
+ if (ret < 0) {
+ /*failed to decode msg;*/
+ req->rpc_err = GARBAGE_ARGS;
+ return -1;
+ }
+ ret = -1;
+ ctx = this->ctx;
+
+ /* Infra for monitoring */
+ filepath = gf_monitor_metrics(ctx);
+ if (!filepath)
+ goto out;
+
+ fd = sys_open(filepath, O_RDONLY, 0);
+ if (fd < 0)
+ goto out;
+
+ if (sys_fstat(fd, &statbuf) < 0)
+ goto out;
+
+ if (statbuf.st_size > GF_UNIT_MB) {
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
+ "Allocated size exceeds expectation: "
+ "reconsider logic (%" GF_PRI_SIZET ")",
+ statbuf.st_size);
+ }
+ msg = GF_CALLOC(1, (statbuf.st_size + 1), gf_common_mt_char);
+ if (!msg)
+ goto out;
+
+ ret = sys_read(fd, msg, statbuf.st_size);
+ if (ret < 0)
+ goto out;
+
+ /* Send all the data in errstr, instead of dictionary for now */
+ glusterfs_translator_info_response_send(req, 0, msg, NULL);
+
+ ret = 0;
out:
- if (fd >= 0)
- sys_close (fd);
+ if (fd >= 0)
+ sys_close(fd);
- GF_FREE (msg);
- GF_FREE (filepath);
+ GF_FREE(msg);
+ GF_FREE(filepath);
- return ret;
+ return ret;
}
int
-glusterfs_handle_defrag (rpcsvc_request_t *req)
+glusterfs_handle_defrag(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_brick_op_req xlator_req = {0,};
- dict_t *dict = NULL;
- xlator_t *xlator = NULL;
- xlator_t *any = NULL;
- dict_t *output = NULL;
- char msg[2048] = {0};
- glusterfs_ctx_t *ctx = NULL;
- glusterfs_graph_t *active = NULL;
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ctx = glusterfsd_ctx;
- GF_ASSERT (ctx);
-
- active = ctx->active;
- if (!active) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- any = active->first;
- ret = xdr_to_generic (req->msg[0], &xlator_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_unserialize (xlator_req.input.input_val,
- xlator_req.input.input_len,
- &dict);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- }
- xlator = xlator_search_by_name (any, xlator_req.name);
- if (!xlator) {
- snprintf (msg, sizeof (msg), "xlator %s is not loaded",
- xlator_req.name);
- goto out;
- }
-
- output = dict_new ();
- if (!output) {
- ret = -1;
- goto out;
- }
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ xlator_t *xlator = NULL;
+ xlator_t *any = NULL;
+ dict_t *output = NULL;
+ char msg[2048] = {0};
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT(ctx);
+
+ active = ctx->active;
+ if (!active) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ any = active->first;
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize(xlator_req.input.input_val,
+ xlator_req.input.input_len, &dict);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ xlator = xlator_search_by_name(any, xlator_req.name);
+ if (!xlator) {
+ snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);
+ goto out;
+ }
+
+ output = dict_new();
+ if (!output) {
+ ret = -1;
+ goto out;
+ }
- ret = xlator->notify (xlator, GF_EVENT_VOLUME_DEFRAG, dict, output);
+ ret = xlator->notify(xlator, GF_EVENT_VOLUME_DEFRAG, dict, output);
- ret = glusterfs_translator_info_response_send (req, ret,
- msg, output);
+ ret = glusterfs_translator_info_response_send(req, ret, msg, output);
out:
- if (dict)
- dict_unref (dict);
- free (xlator_req.input.input_val); // malloced by xdr
- if (output)
- dict_unref (output);
- free (xlator_req.name); //malloced by xdr
-
- return ret;
-
+ if (dict)
+ dict_unref(dict);
+ free(xlator_req.input.input_val); // malloced by xdr
+ if (output)
+ dict_unref(output);
+ free(xlator_req.name); // malloced by xdr
+
+ return ret;
}
int
-glusterfs_handle_brick_status (rpcsvc_request_t *req)
+glusterfs_handle_brick_status(rpcsvc_request_t *req)
{
- int ret = -1;
- gd1_mgmt_brick_op_req brick_req = {0,};
- gd1_mgmt_brick_op_rsp rsp = {0,};
- glusterfs_ctx_t *ctx = NULL;
- glusterfs_graph_t *active = NULL;
- xlator_t *this = NULL;
- xlator_t *server_xl = NULL;
- xlator_t *brick_xl = NULL;
- dict_t *dict = NULL;
- dict_t *output = NULL;
- char *xname = NULL;
- uint32_t cmd = 0;
- char *msg = NULL;
- char *brickname = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &brick_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- dict = dict_new ();
- ret = dict_unserialize (brick_req.input.input_val,
- brick_req.input.input_len, &dict);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to unserialize "
- "req-buffer to dictionary");
- goto out;
- }
-
- ret = dict_get_uint32 (dict, "cmd", &cmd);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Couldn't get status op");
- goto out;
- }
+ int ret = -1;
+ gd1_mgmt_brick_op_req brick_req = {
+ 0,
+ };
+ gd1_mgmt_brick_op_rsp rsp = {
+ 0,
+ };
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *this = NULL;
+ xlator_t *server_xl = NULL;
+ xlator_t *brick_xl = NULL;
+ dict_t *dict = NULL;
+ dict_t *output = NULL;
+ char *xname = NULL;
+ uint32_t cmd = 0;
+ char *msg = NULL;
+ char *brickname = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &brick_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ dict = dict_new();
+ ret = dict_unserialize(brick_req.input.input_val, brick_req.input.input_len,
+ &dict);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to unserialize "
+ "req-buffer to dictionary");
+ goto out;
+ }
+
+ ret = dict_get_uint32(dict, "cmd", &cmd);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR, "Couldn't get status op");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "brick-name", &brickname);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Couldn't get brickname from"
+ " dict");
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ if (ctx == NULL) {
+ gf_log(this->name, GF_LOG_ERROR, "ctx returned NULL");
+ ret = -1;
+ goto out;
+ }
+ if (ctx->active == NULL) {
+ gf_log(this->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
+ active = ctx->active;
+ if (ctx->active->first == NULL) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "ctx->active->first "
+ "returned NULL");
+ ret = -1;
+ goto out;
+ }
+ server_xl = active->first;
- ret = dict_get_str (dict, "brick-name", &brickname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Couldn't get brickname from"
- " dict");
- goto out;
- }
+ brick_xl = get_xlator_by_name(server_xl, brickname);
+ if (!brick_xl) {
+ gf_log(this->name, GF_LOG_ERROR, "xlator %s is not loaded", xname);
+ ret = -1;
+ goto out;
+ }
+
+ output = dict_new();
+ switch (cmd & GF_CLI_STATUS_MASK) {
+ case GF_CLI_STATUS_MEM:
+ ret = 0;
+ gf_proc_dump_mem_info_to_dict(output);
+ gf_proc_dump_mempool_info_to_dict(ctx, output);
+ break;
+
+ case GF_CLI_STATUS_CLIENTS:
+ case GF_CLI_STATUS_CLIENT_LIST:
+ ret = server_xl->dumpops->priv_to_dict(server_xl, output,
+ brickname);
+ break;
+
+ case GF_CLI_STATUS_INODE:
+ ret = server_xl->dumpops->inode_to_dict(brick_xl, output);
+ break;
+
+ case GF_CLI_STATUS_FD:
+ ret = server_xl->dumpops->fd_to_dict(brick_xl, output);
+ break;
+
+ case GF_CLI_STATUS_CALLPOOL:
+ ret = 0;
+ gf_proc_dump_pending_frames_to_dict(ctx->pool, output);
+ break;
- ctx = glusterfsd_ctx;
- if (ctx == NULL) {
- gf_log (this->name, GF_LOG_ERROR, "ctx returned NULL");
- ret = -1;
- goto out;
- }
- if (ctx->active == NULL) {
- gf_log (this->name, GF_LOG_ERROR, "ctx->active returned NULL");
- ret = -1;
- goto out;
- }
- active = ctx->active;
- if (ctx->active->first == NULL) {
- gf_log (this->name, GF_LOG_ERROR, "ctx->active->first "
- "returned NULL");
- ret = -1;
- goto out;
- }
- server_xl = active->first;
+ default:
+ ret = -1;
+ msg = gf_strdup("Unknown status op");
+ break;
+ }
+ rsp.op_ret = ret;
+ rsp.op_errno = 0;
+ if (ret && msg)
+ rsp.op_errstr = msg;
+ else
+ rsp.op_errstr = "";
- brick_xl = get_xlator_by_name (server_xl, brickname);
- if (!brick_xl) {
- gf_log (this->name, GF_LOG_ERROR, "xlator %s is not loaded",
- xname);
- ret = -1;
- goto out;
- }
+ ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
+ &rsp.output.output_len);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to serialize output dict to rsp");
+ goto out;
+ }
- output = dict_new ();
- switch (cmd & GF_CLI_STATUS_MASK) {
- case GF_CLI_STATUS_MEM:
- ret = 0;
- gf_proc_dump_mem_info_to_dict (output);
- gf_proc_dump_mempool_info_to_dict (ctx, output);
- break;
-
- case GF_CLI_STATUS_CLIENTS:
- case GF_CLI_STATUS_CLIENT_LIST:
- ret = server_xl->dumpops->priv_to_dict (server_xl,
- output, brickname);
- break;
-
- case GF_CLI_STATUS_INODE:
- ret = server_xl->dumpops->inode_to_dict (brick_xl,
- output);
- break;
-
- case GF_CLI_STATUS_FD:
- ret = server_xl->dumpops->fd_to_dict (brick_xl, output);
- break;
-
- case GF_CLI_STATUS_CALLPOOL:
- ret = 0;
- gf_proc_dump_pending_frames_to_dict (ctx->pool, output);
- break;
-
- default:
- ret = -1;
- msg = gf_strdup ("Unknown status op");
- break;
- }
- rsp.op_ret = ret;
- rsp.op_errno = 0;
- if (ret && msg)
- rsp.op_errstr = msg;
- else
- rsp.op_errstr = "";
-
- ret = dict_allocate_and_serialize (output, &rsp.output.output_val,
- &rsp.output.output_len);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to serialize output dict to rsp");
- goto out;
- }
-
- glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
- ret = 0;
+ glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = 0;
out:
- if (dict)
- dict_unref (dict);
- if (output)
- dict_unref (output);
- free (brick_req.input.input_val);
- GF_FREE (xname);
- GF_FREE (msg);
- GF_FREE (rsp.output.output_val);
-
- return ret;
+ if (dict)
+ dict_unref(dict);
+ if (output)
+ dict_unref(output);
+ free(brick_req.input.input_val);
+ GF_FREE(xname);
+ GF_FREE(msg);
+ GF_FREE(rsp.output.output_val);
+
+ return ret;
}
-
int
-glusterfs_handle_node_status (rpcsvc_request_t *req)
+glusterfs_handle_node_status(rpcsvc_request_t *req)
{
- int ret = -1;
- gd1_mgmt_brick_op_req node_req = {0,};
- gd1_mgmt_brick_op_rsp rsp = {0,};
- glusterfs_ctx_t *ctx = NULL;
- glusterfs_graph_t *active = NULL;
- xlator_t *any = NULL;
- xlator_t *node = NULL;
- xlator_t *subvol = NULL;
- dict_t *dict = NULL;
- dict_t *output = NULL;
- char *volname = NULL;
- char *node_name = NULL;
- char *subvol_name = NULL;
- uint32_t cmd = 0;
- char *msg = NULL;
-
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &node_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- dict = dict_new ();
- ret = dict_unserialize (node_req.input.input_val,
- node_req.input.input_len, &dict);
- if (ret < 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to unserialize "
- "req buffer to dictionary");
- goto out;
- }
-
- ret = dict_get_uint32 (dict, "cmd", &cmd);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get status op");
- goto out;
- }
-
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname");
- goto out;
- }
+ int ret = -1;
+ gd1_mgmt_brick_op_req node_req = {
+ 0,
+ };
+ gd1_mgmt_brick_op_rsp rsp = {
+ 0,
+ };
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *any = NULL;
+ xlator_t *node = NULL;
+ xlator_t *subvol = NULL;
+ dict_t *dict = NULL;
+ dict_t *output = NULL;
+ char *volname = NULL;
+ char *node_name = NULL;
+ char *subvol_name = NULL;
+ uint32_t cmd = 0;
+ char *msg = NULL;
+
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &node_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ dict = dict_new();
+ ret = dict_unserialize(node_req.input.input_val, node_req.input.input_len,
+ &dict);
+ if (ret < 0) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Failed to unserialize "
+ "req buffer to dictionary");
+ goto out;
+ }
+
+ ret = dict_get_uint32(dict, "cmd", &cmd);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get status op");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get volname");
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT(ctx);
+ active = ctx->active;
+ any = active->first;
+
+ if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf(&node_name, "%s", "nfs-server");
+ else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ ret = gf_asprintf(&node_name, "%s", "glustershd");
+ else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
+ ret = gf_asprintf(&node_name, "%s", "quotad");
+ else if ((cmd & GF_CLI_STATUS_BITD) != 0)
+ ret = gf_asprintf(&node_name, "%s", "bitd");
+ else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
+ ret = gf_asprintf(&node_name, "%s", "scrubber");
+
+ else {
+ ret = -1;
+ goto out;
+ }
+ if (ret == -1) {
+ gf_log(THIS->name, GF_LOG_ERROR, "Failed to set node xlator name");
+ goto out;
+ }
+
+ node = xlator_search_by_name(any, node_name);
+ if (!node) {
+ ret = -1;
+ gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", node_name);
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf(&subvol_name, "%s", volname);
+ else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ ret = gf_asprintf(&subvol_name, "%s-replicate-0", volname);
+ else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
+ ret = gf_asprintf(&subvol_name, "%s", volname);
+ else if ((cmd & GF_CLI_STATUS_BITD) != 0)
+ ret = gf_asprintf(&subvol_name, "%s", volname);
+ else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
+ ret = gf_asprintf(&subvol_name, "%s", volname);
+ else {
+ ret = -1;
+ goto out;
+ }
+ if (ret == -1) {
+ gf_log(THIS->name, GF_LOG_ERROR, "Failed to set node xlator name");
+ goto out;
+ }
+
+ subvol = xlator_search_by_name(node, subvol_name);
+ if (!subvol) {
+ ret = -1;
+ gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
+ subvol_name);
+ goto out;
+ }
+
+ output = dict_new();
+ switch (cmd & GF_CLI_STATUS_MASK) {
+ case GF_CLI_STATUS_MEM:
+ ret = 0;
+ gf_proc_dump_mem_info_to_dict(output);
+ gf_proc_dump_mempool_info_to_dict(ctx, output);
+ break;
+
+ case GF_CLI_STATUS_CLIENTS:
+ // clients not availbale for SHD
+ if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ break;
- ctx = glusterfsd_ctx;
- GF_ASSERT (ctx);
- active = ctx->active;
- any = active->first;
-
- if ((cmd & GF_CLI_STATUS_NFS) != 0)
- ret = gf_asprintf (&node_name, "%s", "nfs-server");
- else if ((cmd & GF_CLI_STATUS_SHD) != 0)
- ret = gf_asprintf (&node_name, "%s", "glustershd");
- else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
- ret = gf_asprintf (&node_name, "%s", "quotad");
- else if ((cmd & GF_CLI_STATUS_BITD) != 0)
- ret = gf_asprintf (&node_name, "%s", "bitd");
- else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
- ret = gf_asprintf (&node_name, "%s", "scrubber");
-
- else {
- ret = -1;
- goto out;
- }
- if (ret == -1) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to set node xlator name");
+ ret = dict_set_str(output, "volname", volname);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Error setting volname to dict");
goto out;
- }
+ }
+ ret = node->dumpops->priv_to_dict(node, output, NULL);
+ break;
- node = xlator_search_by_name (any, node_name);
- if (!node) {
- ret = -1;
- gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
- node_name);
- goto out;
- }
+ case GF_CLI_STATUS_INODE:
+ ret = 0;
+ inode_table_dump_to_dict(subvol->itable, "conn0", output);
+ ret = dict_set_int32(output, "conncount", 1);
+ break;
- if ((cmd & GF_CLI_STATUS_NFS) != 0)
- ret = gf_asprintf (&subvol_name, "%s", volname);
- else if ((cmd & GF_CLI_STATUS_SHD) != 0)
- ret = gf_asprintf (&subvol_name, "%s-replicate-0", volname);
- else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
- ret = gf_asprintf (&subvol_name, "%s", volname);
- else if ((cmd & GF_CLI_STATUS_BITD) != 0)
- ret = gf_asprintf (&subvol_name, "%s", volname);
- else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
- ret = gf_asprintf (&subvol_name, "%s", volname);
- else {
- ret = -1;
- goto out;
- }
- if (ret == -1) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to set node xlator name");
- goto out;
- }
+ case GF_CLI_STATUS_FD:
+ // cannot find fd-tables in nfs-server graph
+ // TODO: finish once found
+ break;
- subvol = xlator_search_by_name (node, subvol_name);
- if (!subvol) {
- ret = -1;
- gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
- subvol_name);
- goto out;
- }
+ case GF_CLI_STATUS_CALLPOOL:
+ ret = 0;
+ gf_proc_dump_pending_frames_to_dict(ctx->pool, output);
+ break;
- output = dict_new ();
- switch (cmd & GF_CLI_STATUS_MASK) {
- case GF_CLI_STATUS_MEM:
- ret = 0;
- gf_proc_dump_mem_info_to_dict (output);
- gf_proc_dump_mempool_info_to_dict (ctx, output);
- break;
-
- case GF_CLI_STATUS_CLIENTS:
- // clients not availbale for SHD
- if ((cmd & GF_CLI_STATUS_SHD) != 0)
- break;
-
- ret = dict_set_str (output, "volname", volname);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Error setting volname to dict");
- goto out;
- }
- ret = node->dumpops->priv_to_dict (node, output, NULL);
- break;
-
- case GF_CLI_STATUS_INODE:
- ret = 0;
- inode_table_dump_to_dict (subvol->itable, "conn0",
- output);
- ret = dict_set_int32 (output, "conncount", 1);
- break;
-
- case GF_CLI_STATUS_FD:
- // cannot find fd-tables in nfs-server graph
- // TODO: finish once found
- break;
-
- case GF_CLI_STATUS_CALLPOOL:
- ret = 0;
- gf_proc_dump_pending_frames_to_dict (ctx->pool, output);
- break;
-
- default:
- ret = -1;
- msg = gf_strdup ("Unknown status op");
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
- break;
- }
- rsp.op_ret = ret;
- rsp.op_errno = 0;
- if (ret && msg)
- rsp.op_errstr = msg;
- else
- rsp.op_errstr = "";
+ default:
+ ret = -1;
+ msg = gf_strdup("Unknown status op");
+ gf_log(THIS->name, GF_LOG_ERROR, "%s", msg);
+ break;
+ }
+ rsp.op_ret = ret;
+ rsp.op_errno = 0;
+ if (ret && msg)
+ rsp.op_errstr = msg;
+ else
+ rsp.op_errstr = "";
- ret = dict_allocate_and_serialize (output, &rsp.output.output_val,
- &rsp.output.output_len);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to serialize output dict to rsp");
- goto out;
- }
+ ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
+ &rsp.output.output_len);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Failed to serialize output dict to rsp");
+ goto out;
+ }
- glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
- ret = 0;
+ glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = 0;
out:
- if (dict)
- dict_unref (dict);
- free (node_req.input.input_val);
- GF_FREE (msg);
- GF_FREE (rsp.output.output_val);
- GF_FREE (node_name);
- GF_FREE (subvol_name);
-
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
+ if (dict)
+ dict_unref(dict);
+ free(node_req.input.input_val);
+ GF_FREE(msg);
+ GF_FREE(rsp.output.output_val);
+ GF_FREE(node_name);
+ GF_FREE(subvol_name);
+
+ gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
}
int
-glusterfs_handle_nfs_profile (rpcsvc_request_t *req)
+glusterfs_handle_nfs_profile(rpcsvc_request_t *req)
{
- int ret = -1;
- gd1_mgmt_brick_op_req nfs_req = {0,};
- gd1_mgmt_brick_op_rsp rsp = {0,};
- dict_t *dict = NULL;
- glusterfs_ctx_t *ctx = NULL;
- glusterfs_graph_t *active = NULL;
- xlator_t *any = NULL;
- xlator_t *nfs = NULL;
- xlator_t *subvol = NULL;
- char *volname = NULL;
- dict_t *output = NULL;
-
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &nfs_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- dict = dict_new ();
- ret = dict_unserialize (nfs_req.input.input_val,
- nfs_req.input.input_len, &dict);
- if (ret < 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to "
- "unserialize req-buffer to dict");
- goto out;
- }
-
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get volname");
- goto out;
- }
-
- ctx = glusterfsd_ctx;
- GF_ASSERT (ctx);
-
- active = ctx->active;
- any = active->first;
-
- // is this needed?
- // are problems possible by searching for subvol directly from "any"?
- nfs = xlator_search_by_name (any, "nfs-server");
- if (!nfs) {
- ret = -1;
- gf_log (THIS->name, GF_LOG_ERROR, "xlator nfs-server is "
- "not loaded");
- goto out;
- }
-
- subvol = xlator_search_by_name (nfs, volname);
- if (!subvol) {
- ret = -1;
- gf_log (THIS->name, GF_LOG_ERROR, "xlator %s is no loaded",
- volname);
- goto out;
- }
+ int ret = -1;
+ gd1_mgmt_brick_op_req nfs_req = {
+ 0,
+ };
+ gd1_mgmt_brick_op_rsp rsp = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *any = NULL;
+ xlator_t *nfs = NULL;
+ xlator_t *subvol = NULL;
+ char *volname = NULL;
+ dict_t *output = NULL;
+
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &nfs_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ dict = dict_new();
+ ret = dict_unserialize(nfs_req.input.input_val, nfs_req.input.input_len,
+ &dict);
+ if (ret < 0) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Failed to "
+ "unserialize req-buffer to dict");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get volname");
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT(ctx);
+
+ active = ctx->active;
+ any = active->first;
+
+ // is this needed?
+ // are problems possible by searching for subvol directly from "any"?
+ nfs = xlator_search_by_name(any, "nfs-server");
+ if (!nfs) {
+ ret = -1;
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "xlator nfs-server is "
+ "not loaded");
+ goto out;
+ }
+
+ subvol = xlator_search_by_name(nfs, volname);
+ if (!subvol) {
+ ret = -1;
+ gf_log(THIS->name, GF_LOG_ERROR, "xlator %s is no loaded", volname);
+ goto out;
+ }
- output = dict_new ();
- ret = subvol->notify (subvol, GF_EVENT_TRANSLATOR_INFO, dict, output);
+ output = dict_new();
+ ret = subvol->notify(subvol, GF_EVENT_TRANSLATOR_INFO, dict, output);
- rsp.op_ret = ret;
- rsp.op_errno = 0;
- rsp.op_errstr = "";
+ rsp.op_ret = ret;
+ rsp.op_errno = 0;
+ rsp.op_errstr = "";
- ret = dict_allocate_and_serialize (output, &rsp.output.output_val,
- &rsp.output.output_len);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to serialize output dict to rsp");
- goto out;
- }
+ ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
+ &rsp.output.output_len);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Failed to serialize output dict to rsp");
+ goto out;
+ }
- glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
- ret = 0;
+ glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = 0;
out:
- free (nfs_req.input.input_val);
- if (dict)
- dict_unref (dict);
- if (output)
- dict_unref (output);
- GF_FREE (rsp.output.output_val);
-
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
+ free(nfs_req.input.input_val);
+ if (dict)
+ dict_unref(dict);
+ if (output)
+ dict_unref(output);
+ GF_FREE(rsp.output.output_val);
+
+ gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
}
int
-glusterfs_handle_volume_barrier_op (rpcsvc_request_t *req)
+glusterfs_handle_volume_barrier_op(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_brick_op_req xlator_req = {0,};
- dict_t *dict = NULL;
- xlator_t *xlator = NULL;
- xlator_t *any = NULL;
- dict_t *output = NULL;
- char msg[2048] = {0};
- glusterfs_ctx_t *ctx = NULL;
- glusterfs_graph_t *active = NULL;
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ctx = glusterfsd_ctx;
- GF_ASSERT (ctx);
-
- active = ctx->active;
- if (!active) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- any = active->first;
- ret = xdr_to_generic (req->msg[0], &xlator_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_unserialize (xlator_req.input.input_val,
- xlator_req.input.input_len,
- &dict);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- }
- xlator = xlator_search_by_name (any, xlator_req.name);
- if (!xlator) {
- snprintf (msg, sizeof (msg), "xlator %s is not loaded",
- xlator_req.name);
- goto out;
- }
-
- output = dict_new ();
- if (!output) {
- ret = -1;
- goto out;
- }
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ xlator_t *xlator = NULL;
+ xlator_t *any = NULL;
+ dict_t *output = NULL;
+ char msg[2048] = {0};
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT(ctx);
+
+ active = ctx->active;
+ if (!active) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ any = active->first;
+ ret = xdr_to_generic(req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize(xlator_req.input.input_val,
+ xlator_req.input.input_len, &dict);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ xlator = xlator_search_by_name(any, xlator_req.name);
+ if (!xlator) {
+ snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);
+ goto out;
+ }
+
+ output = dict_new();
+ if (!output) {
+ ret = -1;
+ goto out;
+ }
- ret = xlator->notify (xlator, GF_EVENT_VOLUME_BARRIER_OP,
- dict, output);
+ ret = xlator->notify(xlator, GF_EVENT_VOLUME_BARRIER_OP, dict, output);
- ret = glusterfs_translator_info_response_send (req, ret,
- msg, output);
+ ret = glusterfs_translator_info_response_send(req, ret, msg, output);
out:
- if (dict)
- dict_unref (dict);
- free (xlator_req.input.input_val); // malloced by xdr
- if (output)
- dict_unref (output);
- free (xlator_req.name); //malloced by xdr
-
- return ret;
-
+ if (dict)
+ dict_unref(dict);
+ free(xlator_req.input.input_val); // malloced by xdr
+ if (output)
+ dict_unref(output);
+ free(xlator_req.name); // malloced by xdr
+
+ return ret;
}
int
-glusterfs_handle_barrier (rpcsvc_request_t *req)
+glusterfs_handle_barrier(rpcsvc_request_t *req)
{
- int ret = -1;
- gd1_mgmt_brick_op_req brick_req = {0,};
- gd1_mgmt_brick_op_rsp brick_rsp = {0,};
- glusterfs_ctx_t *ctx = NULL;
- glusterfs_graph_t *active = NULL;
- xlator_t *top = NULL;
- xlator_t *xlator = NULL;
- xlator_t *old_THIS = NULL;
- dict_t *dict = NULL;
- gf_boolean_t barrier = _gf_true;
- gf_boolean_t barrier_err = _gf_false;
- xlator_list_t *trav;
-
- GF_ASSERT (req);
-
- ret = xdr_to_generic(req->msg[0], &brick_req,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
- if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- ctx = glusterfsd_ctx;
- GF_ASSERT (ctx);
- active = ctx->active;
- top = active->first;
-
- for (trav = top->children; trav; trav = trav->next) {
- if (strcmp (trav->xlator->name, brick_req.name) == 0) {
- break;
- }
- }
- if (!trav) {
- ret = -1;
- goto out;
- }
- top = trav->xlator;
-
- dict = dict_new();
- if (!dict) {
- ret = -1;
- goto out;
- }
-
- ret = dict_unserialize(brick_req.input.input_val,
- brick_req.input.input_len, &dict);
- if (ret < 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to unserialize "
- "request dictionary");
- goto out;
- }
-
- brick_rsp.op_ret = 0;
- brick_rsp.op_errstr = ""; // initing to prevent serilaztion failures
- old_THIS = THIS;
-
- /* Send barrier request to the barrier xlator */
- xlator = get_xlator_by_type (top, "features/barrier");
- if (!xlator) {
- ret = -1;
- gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
- "features/barrier");
- goto out;
- }
-
- THIS = xlator;
- // TODO: Extend this to accept return of errnos
- ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_OP, dict);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "barrier notify failed");
- brick_rsp.op_ret = ret;
- brick_rsp.op_errstr = gf_strdup ("Failed to reconfigure "
- "barrier.");
- /* This is to invoke changelog-barrier disable if barrier
- * disable fails and don't invoke if barrier enable fails.
- */
- barrier = dict_get_str_boolean (dict, "barrier", _gf_true);
- if (barrier)
- goto submit_reply;
- else
- barrier_err = _gf_true;
- }
+ int ret = -1;
+ gd1_mgmt_brick_op_req brick_req = {
+ 0,
+ };
+ gd1_mgmt_brick_op_rsp brick_rsp = {
+ 0,
+ };
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *top = NULL;
+ xlator_t *xlator = NULL;
+ xlator_t *old_THIS = NULL;
+ dict_t *dict = NULL;
+ gf_boolean_t barrier = _gf_true;
+ gf_boolean_t barrier_err = _gf_false;
+ xlator_list_t *trav;
+
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &brick_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT(ctx);
+ active = ctx->active;
+ top = active->first;
+
+ for (trav = top->children; trav; trav = trav->next) {
+ if (strcmp(trav->xlator->name, brick_req.name) == 0) {
+ break;
+ }
+ }
+ if (!trav) {
+ ret = -1;
+ goto out;
+ }
+ top = trav->xlator;
- /* Reset THIS so that we have it correct in case of an error below
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize(brick_req.input.input_val, brick_req.input.input_len,
+ &dict);
+ if (ret < 0) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Failed to unserialize "
+ "request dictionary");
+ goto out;
+ }
+
+ brick_rsp.op_ret = 0;
+ brick_rsp.op_errstr = ""; // initing to prevent serilaztion failures
+ old_THIS = THIS;
+
+ /* Send barrier request to the barrier xlator */
+ xlator = get_xlator_by_type(top, "features/barrier");
+ if (!xlator) {
+ ret = -1;
+ gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
+ "features/barrier");
+ goto out;
+ }
+
+ THIS = xlator;
+ // TODO: Extend this to accept return of errnos
+ ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_OP, dict);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "barrier notify failed");
+ brick_rsp.op_ret = ret;
+ brick_rsp.op_errstr = gf_strdup(
+ "Failed to reconfigure "
+ "barrier.");
+ /* This is to invoke changelog-barrier disable if barrier
+ * disable fails and don't invoke if barrier enable fails.
*/
- THIS = old_THIS;
-
- /* Send barrier request to changelog as well */
- xlator = get_xlator_by_type (top, "features/changelog");
- if (!xlator) {
- ret = -1;
- gf_log (THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
- "features/changelog");
- goto out;
- }
+ barrier = dict_get_str_boolean(dict, "barrier", _gf_true);
+ if (barrier)
+ goto submit_reply;
+ else
+ barrier_err = _gf_true;
+ }
- THIS = xlator;
- ret = xlator->notify (xlator, GF_EVENT_TRANSLATOR_OP, dict);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "changelog notify failed");
- brick_rsp.op_ret = ret;
- brick_rsp.op_errstr = gf_strdup ("changelog notify failed");
- goto submit_reply;
- }
+ /* Reset THIS so that we have it correct in case of an error below
+ */
+ THIS = old_THIS;
- if (barrier_err)
- ret = -1;
+ /* Send barrier request to changelog as well */
+ xlator = get_xlator_by_type(top, "features/changelog");
+ if (!xlator) {
+ ret = -1;
+ gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
+ "features/changelog");
+ goto out;
+ }
+
+ THIS = xlator;
+ ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_OP, dict);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "changelog notify failed");
+ brick_rsp.op_ret = ret;
+ brick_rsp.op_errstr = gf_strdup("changelog notify failed");
+ goto submit_reply;
+ }
+
+ if (barrier_err)
+ ret = -1;
submit_reply:
- THIS = old_THIS;
+ THIS = old_THIS;
- ret = glusterfs_submit_reply (req, &brick_rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+ ret = glusterfs_submit_reply(req, &brick_rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
out:
- if (dict)
- dict_unref (dict);
- free (brick_req.input.input_val);
+ if (dict)
+ dict_unref(dict);
+ free(brick_req.input.input_val);
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
+ gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
}
int
-glusterfs_handle_rpc_msg (rpcsvc_request_t *req)
+glusterfs_handle_rpc_msg(rpcsvc_request_t *req)
{
- int ret = -1;
- /* for now, nothing */
- return ret;
+ int ret = -1;
+ /* for now, nothing */
+ return ret;
}
rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
- [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec },
- [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY,
- mgmt_cbk_event},
- [GF_CBK_STATEDUMP] = {"STATEDUMP", GF_CBK_STATEDUMP, mgmt_cbk_event},
+ [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec},
+ [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY,
+ mgmt_cbk_event},
+ [GF_CBK_STATEDUMP] = {"STATEDUMP", GF_CBK_STATEDUMP, mgmt_cbk_event},
};
-
struct rpcclnt_cb_program mgmt_cbk_prog = {
- .progname = "GlusterFS Callback",
- .prognum = GLUSTER_CBK_PROGRAM,
- .progver = GLUSTER_CBK_VERSION,
- .actors = mgmt_cbk_actors,
- .numactors = GF_CBK_MAXVALUE,
+ .progname = "GlusterFS Callback",
+ .prognum = GLUSTER_CBK_PROGRAM,
+ .progver = GLUSTER_CBK_VERSION,
+ .actors = mgmt_cbk_actors,
+ .numactors = GF_CBK_MAXVALUE,
};
char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
- [GF_PMAP_NULL] = "NULL",
- [GF_PMAP_PORTBYBRICK] = "PORTBYBRICK",
- [GF_PMAP_BRICKBYPORT] = "BRICKBYPORT",
- [GF_PMAP_SIGNIN] = "SIGNIN",
- [GF_PMAP_SIGNOUT] = "SIGNOUT",
- [GF_PMAP_SIGNUP] = "SIGNUP", /* DEPRECATED - DON'T USE! */
+ [GF_PMAP_NULL] = "NULL",
+ [GF_PMAP_PORTBYBRICK] = "PORTBYBRICK",
+ [GF_PMAP_BRICKBYPORT] = "BRICKBYPORT",
+ [GF_PMAP_SIGNIN] = "SIGNIN",
+ [GF_PMAP_SIGNOUT] = "SIGNOUT",
+ [GF_PMAP_SIGNUP] = "SIGNUP", /* DEPRECATED - DON'T USE! */
};
-
rpc_clnt_prog_t clnt_pmap_prog = {
- .progname = "Gluster Portmap",
- .prognum = GLUSTER_PMAP_PROGRAM,
- .progver = GLUSTER_PMAP_VERSION,
- .procnames = clnt_pmap_procs,
+ .progname = "Gluster Portmap",
+ .prognum = GLUSTER_PMAP_PROGRAM,
+ .progver = GLUSTER_PMAP_VERSION,
+ .procnames = clnt_pmap_procs,
};
char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
- [GF_HNDSK_NULL] = "NULL",
- [GF_HNDSK_SETVOLUME] = "SETVOLUME",
- [GF_HNDSK_GETSPEC] = "GETSPEC",
- [GF_HNDSK_PING] = "PING",
- [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
+ [GF_HNDSK_NULL] = "NULL",
+ [GF_HNDSK_SETVOLUME] = "SETVOLUME",
+ [GF_HNDSK_GETSPEC] = "GETSPEC",
+ [GF_HNDSK_PING] = "PING",
+ [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
};
rpc_clnt_prog_t clnt_handshake_prog = {
- .progname = "GlusterFS Handshake",
- .prognum = GLUSTER_HNDSK_PROGRAM,
- .progver = GLUSTER_HNDSK_VERSION,
- .procnames = clnt_handshake_procs,
+ .progname = "GlusterFS Handshake",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+ .procnames = clnt_handshake_procs,
};
rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
- [GLUSTERD_BRICK_NULL] = {"NULL",
- GLUSTERD_BRICK_NULL,
- glusterfs_handle_rpc_msg,
- NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE",
- GLUSTERD_BRICK_TERMINATE,
- glusterfs_handle_terminate,
- NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO",
- GLUSTERD_BRICK_XLATOR_INFO,
- glusterfs_handle_translator_info_get,
- NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP",
- GLUSTERD_BRICK_XLATOR_OP,
- glusterfs_handle_translator_op,
- NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_STATUS] = {"STATUS",
- GLUSTERD_BRICK_STATUS,
- glusterfs_handle_brick_status,
- NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG",
- GLUSTERD_BRICK_XLATOR_DEFRAG,
- glusterfs_handle_defrag,
- NULL, 0, DRC_NA},
- [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE",
- GLUSTERD_NODE_PROFILE,
- glusterfs_handle_nfs_profile,
- NULL, 0, DRC_NA},
- [GLUSTERD_NODE_STATUS] = {"NFS STATUS",
- GLUSTERD_NODE_STATUS,
- glusterfs_handle_node_status,
- NULL, 0, DRC_NA},
- [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP",
- GLUSTERD_VOLUME_BARRIER_OP,
- glusterfs_handle_volume_barrier_op,
- NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_BARRIER] = {"BARRIER",
- GLUSTERD_BRICK_BARRIER,
- glusterfs_handle_barrier,
- NULL, 0, DRC_NA},
- [GLUSTERD_NODE_BITROT] = {"BITROT",
- GLUSTERD_NODE_BITROT,
- glusterfs_handle_bitrot,
- NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_ATTACH] = {"ATTACH",
- GLUSTERD_BRICK_ATTACH,
- glusterfs_handle_attach,
- NULL, 0, DRC_NA},
-
- [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS",
- GLUSTERD_DUMP_METRICS,
- glusterfs_handle_dump_metrics,
- NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL,
+ glusterfs_handle_rpc_msg, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE,
+ glusterfs_handle_terminate, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO",
+ GLUSTERD_BRICK_XLATOR_INFO,
+ glusterfs_handle_translator_info_get, NULL,
+ 0, DRC_NA},
+ [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP,
+ glusterfs_handle_translator_op, NULL, 0,
+ DRC_NA},
+ [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS,
+ glusterfs_handle_brick_status, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG",
+ GLUSTERD_BRICK_XLATOR_DEFRAG,
+ glusterfs_handle_defrag, NULL, 0, DRC_NA},
+ [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE,
+ glusterfs_handle_nfs_profile, NULL, 0, DRC_NA},
+ [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS,
+ glusterfs_handle_node_status, NULL, 0, DRC_NA},
+ [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP",
+ GLUSTERD_VOLUME_BARRIER_OP,
+ glusterfs_handle_volume_barrier_op, NULL, 0,
+ DRC_NA},
+ [GLUSTERD_BRICK_BARRIER] = {"BARRIER", GLUSTERD_BRICK_BARRIER,
+ glusterfs_handle_barrier, NULL, 0, DRC_NA},
+ [GLUSTERD_NODE_BITROT] = {"BITROT", GLUSTERD_NODE_BITROT,
+ glusterfs_handle_bitrot, NULL, 0, DRC_NA},
+ [GLUSTERD_BRICK_ATTACH] = {"ATTACH", GLUSTERD_BRICK_ATTACH,
+ glusterfs_handle_attach, NULL, 0, DRC_NA},
+
+ [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", GLUSTERD_DUMP_METRICS,
+ glusterfs_handle_dump_metrics, NULL, 0, DRC_NA},
};
struct rpcsvc_program glusterfs_mop_prog = {
- .progname = "Gluster Brick operations",
- .prognum = GD_BRICK_PROGRAM,
- .progver = GD_BRICK_VERSION,
- .actors = glusterfs_actors,
- .numactors = GLUSTERD_BRICK_MAXVALUE,
- .synctask = _gf_true,
+ .progname = "Gluster Brick operations",
+ .prognum = GD_BRICK_PROGRAM,
+ .progver = GD_BRICK_VERSION,
+ .actors = glusterfs_actors,
+ .numactors = GLUSTERD_BRICK_MAXVALUE,
+ .synctask = _gf_true,
};
int
-mgmt_submit_request (void *req, call_frame_t *frame,
- glusterfs_ctx_t *ctx,
- rpc_clnt_prog_t *prog, int procnum,
- fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
+mgmt_submit_request(void *req, call_frame_t *frame, glusterfs_ctx_t *ctx,
+ rpc_clnt_prog_t *prog, int procnum, fop_cbk_fn_t cbkfn,
+ xdrproc_t xdrproc)
{
- int ret = -1;
- int count = 0;
- struct iovec iov = {0, };
- struct iobuf *iobuf = NULL;
- struct iobref *iobref = NULL;
- ssize_t xdr_size = 0;
-
- iobref = iobref_new ();
- if (!iobref) {
- goto out;
- }
-
- if (req) {
- xdr_size = xdr_sizeof (xdrproc, req);
-
- iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size);
- if (!iobuf) {
- goto out;
- };
-
- iobref_add (iobref, iobuf);
-
- iov.iov_base = iobuf->ptr;
- iov.iov_len = iobuf_pagesize (iobuf);
-
- /* Create the xdr payload */
- ret = xdr_serialize_generic (iov, req, xdrproc);
- if (ret == -1) {
- gf_log (THIS->name, GF_LOG_WARNING, "failed to create XDR payload");
- goto out;
- }
- iov.iov_len = ret;
- count = 1;
+ int ret = -1;
+ int count = 0;
+ struct iovec iov = {
+ 0,
+ };
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ ssize_t xdr_size = 0;
+
+ iobref = iobref_new();
+ if (!iobref) {
+ goto out;
+ }
+
+ if (req) {
+ xdr_size = xdr_sizeof(xdrproc, req);
+
+ iobuf = iobuf_get2(ctx->iobuf_pool, xdr_size);
+ if (!iobuf) {
+ goto out;
+ };
+
+ iobref_add(iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize(iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic(iov, req, xdrproc);
+ if (ret == -1) {
+ gf_log(THIS->name, GF_LOG_WARNING, "failed to create XDR payload");
+ goto out;
}
+ iov.iov_len = ret;
+ count = 1;
+ }
- /* Send the msg */
- ret = rpc_clnt_submit (ctx->mgmt, prog, procnum, cbkfn,
- &iov, count,
- NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
+ /* Send the msg */
+ ret = rpc_clnt_submit(ctx->mgmt, prog, procnum, cbkfn, &iov, count, NULL, 0,
+ iobref, frame, NULL, 0, NULL, 0, NULL);
out:
- if (iobref)
- iobref_unref (iobref);
+ if (iobref)
+ iobref_unref(iobref);
- if (iobuf)
- iobuf_unref (iobuf);
- return ret;
+ if (iobuf)
+ iobuf_unref(iobuf);
+ return ret;
}
int
-mgmt_getspec_cbk (struct rpc_req *req, struct iovec *iov, int count,
- void *myframe)
+mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- gf_getspec_rsp rsp = {0,};
- call_frame_t *frame = NULL;
- glusterfs_ctx_t *ctx = NULL;
- int ret = 0, locked = 0;
- ssize_t size = 0;
- FILE *tmpfp = NULL;
- char *volfile_id = NULL;
- gf_volfile_t *volfile_obj = NULL;
- gf_volfile_t *volfile_tmp = NULL;
- char sha256_hash[SHA256_DIGEST_LENGTH] = {0, };
- dict_t *dict = NULL;
- char *servers_list = NULL;
- int tmp_fd = -1;
- char template[] = "/tmp/glfs.volfile.XXXXXX";
-
- frame = myframe;
- ctx = frame->this->ctx;
-
- if (-1 == req->rpc_status) {
- ret = -1;
- goto out;
- }
-
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
- if (ret < 0) {
- gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error");
- ret = -1;
- goto out;
- }
-
- if (-1 == rsp.op_ret) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "failed to get the 'volume file' from server");
- ret = rsp.op_errno;
- goto out;
- }
-
- if (!rsp.xdata.xdata_len) {
- goto volfile;
- }
-
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- errno = ENOMEM;
- goto out;
- }
-
- ret = dict_unserialize (rsp.xdata.xdata_val, rsp.xdata.xdata_len,
- &dict);
- if (ret) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "failed to unserialize xdata to dictionary");
- goto out;
- }
- dict->extra_stdfree = rsp.xdata.xdata_val;
-
- /* glusterd2 only */
- ret = dict_get_str (dict, "servers-list", &servers_list);
- if (ret) {
- goto volfile;
- }
-
- gf_log (frame->this->name, GF_LOG_INFO,
- "Received list of available volfile servers: %s",
- servers_list);
+ gf_getspec_rsp rsp = {
+ 0,
+ };
+ call_frame_t *frame = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0, locked = 0;
+ ssize_t size = 0;
+ FILE *tmpfp = NULL;
+ char *volfile_id = NULL;
+ gf_volfile_t *volfile_obj = NULL;
+ gf_volfile_t *volfile_tmp = NULL;
+ char sha256_hash[SHA256_DIGEST_LENGTH] = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ char *servers_list = NULL;
+ int tmp_fd = -1;
+ char template[] = "/tmp/glfs.volfile.XXXXXX";
+
+ frame = myframe;
+ ctx = frame->this->ctx;
+
+ if (-1 == req->rpc_status) {
+ ret = -1;
+ goto out;
+ }
- ret = gf_process_getspec_servers_list(&ctx->cmd_args, servers_list);
- if (ret) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "Failed (%s) to process servers list: %s",
- strerror (errno), servers_list);
- }
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
+ if (ret < 0) {
+ gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error");
+ ret = -1;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "failed to get the 'volume file' from server");
+ ret = rsp.op_errno;
+ goto out;
+ }
+
+ if (!rsp.xdata.xdata_len) {
+ goto volfile;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ ret = dict_unserialize(rsp.xdata.xdata_val, rsp.xdata.xdata_len, &dict);
+ if (ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "failed to unserialize xdata to dictionary");
+ goto out;
+ }
+ dict->extra_stdfree = rsp.xdata.xdata_val;
+
+ /* glusterd2 only */
+ ret = dict_get_str(dict, "servers-list", &servers_list);
+ if (ret) {
+ goto volfile;
+ }
+
+ gf_log(frame->this->name, GF_LOG_INFO,
+ "Received list of available volfile servers: %s", servers_list);
+
+ ret = gf_process_getspec_servers_list(&ctx->cmd_args, servers_list);
+ if (ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "Failed (%s) to process servers list: %s", strerror(errno),
+ servers_list);
+ }
volfile:
- ret = 0;
- size = rsp.op_ret;
-
- glusterfs_compute_sha256 ((const unsigned char *) rsp.spec, size,
- sha256_hash);
-
- volfile_id = frame->local;
-
- LOCK (&ctx->volfile_lock);
- {
- locked = 1;
-
- list_for_each_entry (volfile_obj, &ctx->volfile_list,
- volfile_list) {
- if (!strcmp (volfile_id, volfile_obj->vol_id)) {
- if (!memcmp (sha256_hash,
- volfile_obj->volfile_checksum,
- sizeof (volfile_obj->volfile_checksum))) {
- gf_log (frame->this->name, GF_LOG_INFO,
- "No change in volfile,"
- "continuing");
- goto out;
- }
- volfile_tmp = volfile_obj;
- break;
- }
- }
+ ret = 0;
+ size = rsp.op_ret;
- /* coverity[secure_temp] mkstemp uses 0600 as the mode */
- tmp_fd = mkstemp (template);
- if (-1 == tmp_fd) {
- gf_msg (frame->this->name, GF_LOG_ERROR, 0,
- glusterfsd_msg_39,
- "Unable to create temporary file: %s",
- template);
- ret = -1;
- goto out;
- }
+ glusterfs_compute_sha256((const unsigned char *)rsp.spec, size,
+ sha256_hash);
- /* Calling unlink so that when the file is closed or program
- * terminates the temporary file is deleted.
- */
- ret = sys_unlink (template);
- if (ret < 0) {
- gf_msg (frame->this->name, GF_LOG_INFO, 0,
- glusterfsd_msg_39,
- "Unable to delete temporary file: %s",
- template);
- ret = 0;
- }
+ volfile_id = frame->local;
- tmpfp = fdopen (tmp_fd, "w+b");
- if (!tmpfp) {
- ret = -1;
- goto out;
- }
+ LOCK(&ctx->volfile_lock);
+ {
+ locked = 1;
- fwrite (rsp.spec, size, 1, tmpfp);
- fflush (tmpfp);
- if (ferror (tmpfp)) {
- ret = -1;
- goto out;
- }
-
- /* Check if only options have changed. No need to reload the
- * volfile if topology hasn't changed.
- * glusterfs_volfile_reconfigure returns 3 possible return states
- * return 0 =======> reconfiguration of options has succeeded
- * return 1 =======> the graph has to be reconstructed and all the xlators should be inited
- * return -1(or -ve) =======> Some Internal Error occurred during the operation
- */
-
- ret = glusterfs_volfile_reconfigure (tmpfp, ctx);
- if (ret == 0) {
- gf_log ("glusterfsd-mgmt", GF_LOG_DEBUG,
- "No need to re-load volfile, reconfigure done");
- if (!volfile_tmp) {
- ret = -1;
- gf_log ("mgmt", GF_LOG_ERROR, "Graph "
- "reconfigure succeeded with out having "
- "checksum.");
- goto out;
- }
- memcpy (volfile_tmp->volfile_checksum, sha256_hash,
- sizeof (volfile_tmp->volfile_checksum));
- goto out;
+ list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
+ {
+ if (!strcmp(volfile_id, volfile_obj->vol_id)) {
+ if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
+ sizeof(volfile_obj->volfile_checksum))) {
+ gf_log(frame->this->name, GF_LOG_INFO,
+ "No change in volfile,"
+ "continuing");
+ goto out;
}
+ volfile_tmp = volfile_obj;
+ break;
+ }
+ }
- if (ret < 0) {
- gf_log ("glusterfsd-mgmt",
- GF_LOG_DEBUG, "Reconfigure failed !!");
- goto out;
- }
+ /* coverity[secure_temp] mkstemp uses 0600 as the mode */
+ tmp_fd = mkstemp(template);
+ if (-1 == tmp_fd) {
+ gf_msg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
+ "Unable to create temporary file: %s", template);
+ ret = -1;
+ goto out;
+ }
- ret = glusterfs_process_volfp (ctx, tmpfp);
- /* tmpfp closed */
- tmpfp = NULL;
- tmp_fd = -1;
- if (ret)
- goto out;
-
- if (!volfile_tmp) {
- volfile_tmp = GF_CALLOC (1, sizeof (gf_volfile_t),
- gf_common_volfile_t);
- if (!volfile_tmp) {
- ret = -1;
- goto out;
- }
-
- INIT_LIST_HEAD (&volfile_tmp->volfile_list);
- list_add (&volfile_tmp->volfile_list,
- &ctx->volfile_list);
- snprintf (volfile_tmp->vol_id,
- sizeof (volfile_tmp->vol_id), "%s",
- volfile_id);
- }
- memcpy (volfile_tmp->volfile_checksum, sha256_hash,
- sizeof (volfile_tmp->volfile_checksum));
+ /* Calling unlink so that when the file is closed or program
+ * terminates the temporary file is deleted.
+ */
+ ret = sys_unlink(template);
+ if (ret < 0) {
+ gf_msg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
+ "Unable to delete temporary file: %s", template);
+ ret = 0;
}
- UNLOCK (&ctx->volfile_lock);
- locked = 0;
+ tmpfp = fdopen(tmp_fd, "w+b");
+ if (!tmpfp) {
+ ret = -1;
+ goto out;
+ }
- if (!is_mgmt_rpc_reconnect) {
- need_emancipate = 1;
- glusterfs_mgmt_pmap_signin (ctx);
- is_mgmt_rpc_reconnect = _gf_true;
+ fwrite(rsp.spec, size, 1, tmpfp);
+ fflush(tmpfp);
+ if (ferror(tmpfp)) {
+ ret = -1;
+ goto out;
}
-out:
+ /* Check if only options have changed. No need to reload the
+ * volfile if topology hasn't changed.
+ * glusterfs_volfile_reconfigure returns 3 possible return states
+ * return 0 =======> reconfiguration of options has succeeded
+ * return 1 =======> the graph has to be reconstructed and all
+ * the xlators should be inited return -1(or -ve) =======> Some Internal
+ * Error occurred during the operation
+ */
- if (locked)
- UNLOCK (&ctx->volfile_lock);
+ ret = glusterfs_volfile_reconfigure(tmpfp, ctx);
+ if (ret == 0) {
+ gf_log("glusterfsd-mgmt", GF_LOG_DEBUG,
+ "No need to re-load volfile, reconfigure done");
+ if (!volfile_tmp) {
+ ret = -1;
+ gf_log("mgmt", GF_LOG_ERROR,
+ "Graph "
+ "reconfigure succeeded with out having "
+ "checksum.");
+ goto out;
+ }
+ memcpy(volfile_tmp->volfile_checksum, sha256_hash,
+ sizeof(volfile_tmp->volfile_checksum));
+ goto out;
+ }
- GF_FREE (frame->local);
- frame->local = NULL;
- STACK_DESTROY (frame->root);
+ if (ret < 0) {
+ gf_log("glusterfsd-mgmt", GF_LOG_DEBUG, "Reconfigure failed !!");
+ goto out;
+ }
- free (rsp.spec);
+ ret = glusterfs_process_volfp(ctx, tmpfp);
+ /* tmpfp closed */
+ tmpfp = NULL;
+ tmp_fd = -1;
+ if (ret)
+ goto out;
- if (dict)
- dict_unref (dict);
+ if (!volfile_tmp) {
+ volfile_tmp = GF_CALLOC(1, sizeof(gf_volfile_t),
+ gf_common_volfile_t);
+ if (!volfile_tmp) {
+ ret = -1;
+ goto out;
+ }
- // Stop if server is running at an unsupported op-version
- if (ENOTSUP == ret) {
- gf_log ("mgmt", GF_LOG_ERROR, "Server is operating at an "
- "op-version which is not supported");
- cleanup_and_exit (0);
+ INIT_LIST_HEAD(&volfile_tmp->volfile_list);
+ list_add(&volfile_tmp->volfile_list, &ctx->volfile_list);
+ snprintf(volfile_tmp->vol_id, sizeof(volfile_tmp->vol_id), "%s",
+ volfile_id);
}
+ memcpy(volfile_tmp->volfile_checksum, sha256_hash,
+ sizeof(volfile_tmp->volfile_checksum));
+ }
+ UNLOCK(&ctx->volfile_lock);
- if (ret && ctx && !ctx->active) {
- /* Do it only for the first time */
- /* Failed to get the volume file, something wrong,
- restart the process */
- gf_log ("mgmt", GF_LOG_ERROR,
- "failed to fetch volume file (key:%s)",
- ctx->cmd_args.volfile_id);
- cleanup_and_exit (0);
- }
+ locked = 0;
+ if (!is_mgmt_rpc_reconnect) {
+ need_emancipate = 1;
+ glusterfs_mgmt_pmap_signin(ctx);
+ is_mgmt_rpc_reconnect = _gf_true;
+ }
- if (tmpfp)
- fclose (tmpfp);
- else if (tmp_fd != -1)
- sys_close (tmp_fd);
+out:
- return 0;
+ if (locked)
+ UNLOCK(&ctx->volfile_lock);
+
+ GF_FREE(frame->local);
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
+
+ free(rsp.spec);
+
+ if (dict)
+ dict_unref(dict);
+
+ // Stop if server is running at an unsupported op-version
+ if (ENOTSUP == ret) {
+ gf_log("mgmt", GF_LOG_ERROR,
+ "Server is operating at an "
+ "op-version which is not supported");
+ cleanup_and_exit(0);
+ }
+
+ if (ret && ctx && !ctx->active) {
+ /* Do it only for the first time */
+ /* Failed to get the volume file, something wrong,
+ restart the process */
+ gf_log("mgmt", GF_LOG_ERROR, "failed to fetch volume file (key:%s)",
+ ctx->cmd_args.volfile_id);
+ cleanup_and_exit(0);
+ }
+
+ if (tmpfp)
+ fclose(tmpfp);
+ else if (tmp_fd != -1)
+ sys_close(tmp_fd);
+
+ return 0;
}
-
static int
-glusterfs_volfile_fetch_one (glusterfs_ctx_t *ctx, char *volfile_id)
+glusterfs_volfile_fetch_one(glusterfs_ctx_t *ctx, char *volfile_id)
{
- cmd_args_t *cmd_args = NULL;
- gf_getspec_req req = {0, };
- int ret = 0;
- call_frame_t *frame = NULL;
- dict_t *dict = NULL;
-
- cmd_args = &ctx->cmd_args;
+ cmd_args_t *cmd_args = NULL;
+ gf_getspec_req req = {
+ 0,
+ };
+ int ret = 0;
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+
+ cmd_args = &ctx->cmd_args;
+ if (!volfile_id) {
+ volfile_id = ctx->cmd_args.volfile_id;
if (!volfile_id) {
- volfile_id = ctx->cmd_args.volfile_id;
- if (!volfile_id) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "No volfile-id provided, erroring out");
- return -1;
- }
- }
-
- frame = create_frame (THIS, ctx->pool);
-
- req.key = volfile_id;
- req.flags = 0;
- /*
- * We are only storing one variable in local, hence using the same
- * variable. If multiple local variable is required, create a struct.
- */
- frame->local = gf_strdup (volfile_id);
- if (!frame->local) {
- ret = -1;
- goto out;
- }
-
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- goto out;
- }
-
- // Set the supported min and max op-versions, so glusterd can make a
- // decision
- ret = dict_set_int32 (dict, "min-op-version", GD_OP_VERSION_MIN);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to set min-op-version"
- " in request dict");
- goto out;
- }
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "No volfile-id provided, erroring out");
+ return -1;
+ }
+ }
+
+ frame = create_frame(THIS, ctx->pool);
+
+ req.key = volfile_id;
+ req.flags = 0;
+ /*
+ * We are only storing one variable in local, hence using the same
+ * variable. If multiple local variable is required, create a struct.
+ */
+ frame->local = gf_strdup(volfile_id);
+ if (!frame->local) {
+ ret = -1;
+ goto out;
+ }
- ret = dict_set_int32 (dict, "max-op-version", GD_OP_VERSION_MAX);
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ // Set the supported min and max op-versions, so glusterd can make a
+ // decision
+ ret = dict_set_int32(dict, "min-op-version", GD_OP_VERSION_MIN);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Failed to set min-op-version"
+ " in request dict");
+ goto out;
+ }
+
+ ret = dict_set_int32(dict, "max-op-version", GD_OP_VERSION_MAX);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Failed to set max-op-version"
+ " in request dict");
+ goto out;
+ }
+
+ /* Ask for a list of volfile (glusterd2 only) servers */
+ if (GF_CLIENT_PROCESS == ctx->process_mode) {
+ req.flags = req.flags | GF_GETSPEC_FLAG_SERVERS_LIST;
+ }
+
+ if (cmd_args->brick_name) {
+ ret = dict_set_dynstr_with_alloc(dict, "brick_name",
+ cmd_args->brick_name);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to set max-op-version"
- " in request dict");
- goto out;
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "Failed to set brick_name in request dict");
+ goto out;
}
+ }
- /* Ask for a list of volfile (glusterd2 only) servers */
- if (GF_CLIENT_PROCESS == ctx->process_mode) {
- req.flags = req.flags | GF_GETSPEC_FLAG_SERVERS_LIST;
- }
-
- if (cmd_args->brick_name) {
- ret = dict_set_dynstr_with_alloc (dict, "brick_name",
- cmd_args->brick_name);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to set brick_name in request dict");
- goto out;
- }
- }
-
- ret = dict_allocate_and_serialize (dict, &req.xdata.xdata_val,
- &req.xdata.xdata_len);
- if (ret < 0) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to serialize dictionary");
- goto out;
- }
+ ret = dict_allocate_and_serialize(dict, &req.xdata.xdata_val,
+ &req.xdata.xdata_len);
+ if (ret < 0) {
+ gf_log(THIS->name, GF_LOG_ERROR, "Failed to serialize dictionary");
+ goto out;
+ }
- ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog,
- GF_HNDSK_GETSPEC, mgmt_getspec_cbk,
- (xdrproc_t)xdr_gf_getspec_req);
+ ret = mgmt_submit_request(&req, frame, ctx, &clnt_handshake_prog,
+ GF_HNDSK_GETSPEC, mgmt_getspec_cbk,
+ (xdrproc_t)xdr_gf_getspec_req);
out:
- GF_FREE (req.xdata.xdata_val);
- if (dict)
- dict_unref (dict);
- if (ret && frame) {
- /* Free the frame->local fast, because we have not used memget
- */
- GF_FREE (frame->local);
- frame->local = NULL;
- STACK_DESTROY (frame->root);
- }
+ GF_FREE(req.xdata.xdata_val);
+ if (dict)
+ dict_unref(dict);
+ if (ret && frame) {
+ /* Free the frame->local fast, because we have not used memget
+ */
+ GF_FREE(frame->local);
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
+ }
- return ret;
+ return ret;
}
-
int
-glusterfs_volfile_fetch (glusterfs_ctx_t *ctx)
+glusterfs_volfile_fetch(glusterfs_ctx_t *ctx)
{
- xlator_t *server_xl = NULL;
- xlator_list_t *trav;
- int ret;
+ xlator_t *server_xl = NULL;
+ xlator_list_t *trav;
+ int ret;
- LOCK (&ctx->volfile_lock);
- {
- if (ctx->active) {
- server_xl = ctx->active->first;
- if (strcmp (server_xl->type, "protocol/server") != 0) {
- server_xl = NULL;
- }
- }
- if (!server_xl) {
- /* Startup (ctx->active not set) or non-server. */
- UNLOCK (&ctx->volfile_lock);
- return glusterfs_volfile_fetch_one
- (ctx, ctx->cmd_args.volfile_id);
- }
+ LOCK(&ctx->volfile_lock);
+ {
+ if (ctx->active) {
+ server_xl = ctx->active->first;
+ if (strcmp(server_xl->type, "protocol/server") != 0) {
+ server_xl = NULL;
+ }
+ }
+ if (!server_xl) {
+ /* Startup (ctx->active not set) or non-server. */
+ UNLOCK(&ctx->volfile_lock);
+ return glusterfs_volfile_fetch_one(ctx, ctx->cmd_args.volfile_id);
+ }
- ret = 0;
- for (trav = server_xl->children; trav; trav = trav->next) {
- ret |= glusterfs_volfile_fetch_one
- (ctx, trav->xlator->volfile_id);
- }
+ ret = 0;
+ for (trav = server_xl->children; trav; trav = trav->next) {
+ ret |= glusterfs_volfile_fetch_one(ctx, trav->xlator->volfile_id);
}
- UNLOCK (&ctx->volfile_lock);
- return ret;
+ }
+ UNLOCK(&ctx->volfile_lock);
+ return ret;
}
-
int32_t
-mgmt_event_notify_cbk (struct rpc_req *req, struct iovec *iov, int count,
- void *myframe)
+mgmt_event_notify_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- gf_event_notify_rsp rsp = {0,};
- call_frame_t *frame = NULL;
- int ret = 0;
+ gf_event_notify_rsp rsp = {
+ 0,
+ };
+ call_frame_t *frame = NULL;
+ int ret = 0;
- frame = myframe;
+ frame = myframe;
- if (-1 == req->rpc_status) {
- ret = -1;
- goto out;
- }
+ if (-1 == req->rpc_status) {
+ ret = -1;
+ goto out;
+ }
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp);
- if (ret < 0) {
- gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error");
- ret = -1;
- goto out;
- }
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp);
+ if (ret < 0) {
+ gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error");
+ ret = -1;
+ goto out;
+ }
- if (-1 == rsp.op_ret) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "failed to get the rsp from server");
- ret = -1;
- goto out;
- }
+ if (-1 == rsp.op_ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "failed to get the rsp from server");
+ ret = -1;
+ goto out;
+ }
out:
- free (rsp.dict.dict_val); //malloced by xdr
- return ret;
-
+ free(rsp.dict.dict_val); // malloced by xdr
+ return ret;
}
int32_t
-glusterfs_rebalance_event_notify_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterfs_rebalance_event_notify_cbk(struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- gf_event_notify_rsp rsp = {0,};
- call_frame_t *frame = NULL;
- int ret = 0;
+ gf_event_notify_rsp rsp = {
+ 0,
+ };
+ call_frame_t *frame = NULL;
+ int ret = 0;
- frame = myframe;
+ frame = myframe;
- if (-1 == req->rpc_status) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "failed to get the rsp from server");
- ret = -1;
- goto out;
- }
+ if (-1 == req->rpc_status) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "failed to get the rsp from server");
+ ret = -1;
+ goto out;
+ }
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp);
- if (ret < 0) {
- gf_log (frame->this->name, GF_LOG_ERROR, "XDR decoding error");
- ret = -1;
- goto out;
- }
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp);
+ if (ret < 0) {
+ gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error");
+ ret = -1;
+ goto out;
+ }
- if (-1 == rsp.op_ret) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "Received error (%s) from server",
- strerror (rsp.op_errno));
- ret = -1;
- goto out;
- }
+ if (-1 == rsp.op_ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "Received error (%s) from server", strerror(rsp.op_errno));
+ ret = -1;
+ goto out;
+ }
out:
- free (rsp.dict.dict_val); //malloced by xdr
+ free(rsp.dict.dict_val); // malloced by xdr
- if (frame) {
- STACK_DESTROY (frame->root);
- }
-
- return ret;
+ if (frame) {
+ STACK_DESTROY(frame->root);
+ }
+ return ret;
}
int32_t
-glusterfs_rebalance_event_notify (dict_t *dict)
+glusterfs_rebalance_event_notify(dict_t *dict)
{
- glusterfs_ctx_t *ctx = NULL;
- gf_event_notify_req req = {0,};
- int32_t ret = -1;
- cmd_args_t *cmd_args = NULL;
- call_frame_t *frame = NULL;
-
- ctx = glusterfsd_ctx;
- cmd_args = &ctx->cmd_args;
+ glusterfs_ctx_t *ctx = NULL;
+ gf_event_notify_req req = {
+ 0,
+ };
+ int32_t ret = -1;
+ cmd_args_t *cmd_args = NULL;
+ call_frame_t *frame = NULL;
- frame = create_frame (THIS, ctx->pool);
+ ctx = glusterfsd_ctx;
+ cmd_args = &ctx->cmd_args;
- req.op = GF_EN_DEFRAG_STATUS;
+ frame = create_frame(THIS, ctx->pool);
- if (dict) {
- ret = dict_set_str (dict, "volname", cmd_args->volfile_id);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "failed to set volname");
- }
- ret = dict_allocate_and_serialize (dict, &req.dict.dict_val,
- &req.dict.dict_len);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "failed to serialize dict");
- }
+ req.op = GF_EN_DEFRAG_STATUS;
+ if (dict) {
+ ret = dict_set_str(dict, "volname", cmd_args->volfile_id);
+ if (ret) {
+ gf_log("", GF_LOG_ERROR, "failed to set volname");
+ }
+ ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_log("", GF_LOG_ERROR, "failed to serialize dict");
}
+ }
- ret = mgmt_submit_request (&req, frame, ctx, &clnt_handshake_prog,
- GF_HNDSK_EVENT_NOTIFY,
- glusterfs_rebalance_event_notify_cbk,
- (xdrproc_t)xdr_gf_event_notify_req);
+ ret = mgmt_submit_request(&req, frame, ctx, &clnt_handshake_prog,
+ GF_HNDSK_EVENT_NOTIFY,
+ glusterfs_rebalance_event_notify_cbk,
+ (xdrproc_t)xdr_gf_event_notify_req);
- GF_FREE (req.dict.dict_val);
- return ret;
+ GF_FREE(req.dict.dict_val);
+ return ret;
}
static int
-mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
- void *data)
+mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
+ void *data)
{
- xlator_t *this = NULL;
- glusterfs_ctx_t *ctx = NULL;
- int ret = 0;
- server_cmdline_t *server = NULL;
- rpc_transport_t *rpc_trans = NULL;
- int need_term = 0;
- int emval = 0;
- static int log_ctr1;
- static int log_ctr2;
- struct dnscache6 *dnscache = NULL;
-
- this = mydata;
- rpc_trans = rpc->conn.trans;
- ctx = this->ctx;
-
- switch (event) {
+ xlator_t *this = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = 0;
+ server_cmdline_t *server = NULL;
+ rpc_transport_t *rpc_trans = NULL;
+ int need_term = 0;
+ int emval = 0;
+ static int log_ctr1;
+ static int log_ctr2;
+ struct dnscache6 *dnscache = NULL;
+
+ this = mydata;
+ rpc_trans = rpc->conn.trans;
+ ctx = this->ctx;
+
+ switch (event) {
case RPC_CLNT_DISCONNECT:
- if (rpc_trans->connect_failed) {
- GF_LOG_OCCASIONALLY (log_ctr1, "glusterfsd-mgmt",
- GF_LOG_ERROR,
- "failed to connect to remote-"
- "host: %s",
- ctx->cmd_args.volfile_server);
- } else {
- GF_LOG_OCCASIONALLY (log_ctr1, "glusterfsd-mgmt",
- GF_LOG_INFO,
- "disconnected from remote-"
- "host: %s",
- ctx->cmd_args.volfile_server);
- }
-
- if (!rpc->disabled) {
- /*
- * Check if dnscache is exhausted for current server
- * and continue until cache is exhausted
- */
- dnscache = rpc_trans->dnscache;
- if (dnscache && dnscache->next) {
- break;
- }
+ if (rpc_trans->connect_failed) {
+ GF_LOG_OCCASIONALLY(log_ctr1, "glusterfsd-mgmt", GF_LOG_ERROR,
+ "failed to connect to remote-"
+ "host: %s",
+ ctx->cmd_args.volfile_server);
+ } else {
+ GF_LOG_OCCASIONALLY(log_ctr1, "glusterfsd-mgmt", GF_LOG_INFO,
+ "disconnected from remote-"
+ "host: %s",
+ ctx->cmd_args.volfile_server);
+ }
+
+ if (!rpc->disabled) {
+ /*
+ * Check if dnscache is exhausted for current server
+ * and continue until cache is exhausted
+ */
+ dnscache = rpc_trans->dnscache;
+ if (dnscache && dnscache->next) {
+ break;
}
- server = ctx->cmd_args.curr_server;
- if (server->list.next == &ctx->cmd_args.volfile_servers) {
- if (!ctx->active) {
- need_term = 1;
- }
- emval = ENOTCONN;
- GF_LOG_OCCASIONALLY (log_ctr2, "glusterfsd-mgmt",
- GF_LOG_INFO,
- "Exhausted all volfile servers");
- break;
+ }
+ server = ctx->cmd_args.curr_server;
+ if (server->list.next == &ctx->cmd_args.volfile_servers) {
+ if (!ctx->active) {
+ need_term = 1;
}
- server = list_entry (server->list.next, typeof(*server), list);
- ctx->cmd_args.curr_server = server;
- ctx->cmd_args.volfile_server = server->volfile_server;
-
- ret = dict_set_str (rpc_trans->options, "remote-host",
- server->volfile_server);
- if (ret != 0) {
- gf_log ("glusterfsd-mgmt", GF_LOG_ERROR,
- "failed to set remote-host: %s",
- server->volfile_server);
- if (!ctx->active) {
- need_term = 1;
- }
- emval = ENOTCONN;
- break;
+ emval = ENOTCONN;
+ GF_LOG_OCCASIONALLY(log_ctr2, "glusterfsd-mgmt", GF_LOG_INFO,
+ "Exhausted all volfile servers");
+ break;
+ }
+ server = list_entry(server->list.next, typeof(*server), list);
+ ctx->cmd_args.curr_server = server;
+ ctx->cmd_args.volfile_server = server->volfile_server;
+
+ ret = dict_set_str(rpc_trans->options, "remote-host",
+ server->volfile_server);
+ if (ret != 0) {
+ gf_log("glusterfsd-mgmt", GF_LOG_ERROR,
+ "failed to set remote-host: %s", server->volfile_server);
+ if (!ctx->active) {
+ need_term = 1;
}
- gf_log ("glusterfsd-mgmt", GF_LOG_INFO,
- "connecting to next volfile server %s",
- server->volfile_server);
+ emval = ENOTCONN;
break;
+ }
+ gf_log("glusterfsd-mgmt", GF_LOG_INFO,
+ "connecting to next volfile server %s",
+ server->volfile_server);
+ break;
case RPC_CLNT_CONNECT:
- ret = glusterfs_volfile_fetch (ctx);
- if (ret) {
- emval = ret;
- if (!ctx->active) {
- need_term = 1;
- gf_log ("glusterfsd-mgmt", GF_LOG_ERROR,
- "failed to fetch volume file (key:%s)",
- ctx->cmd_args.volfile_id);
- break;
-
- }
+ ret = glusterfs_volfile_fetch(ctx);
+ if (ret) {
+ emval = ret;
+ if (!ctx->active) {
+ need_term = 1;
+ gf_log("glusterfsd-mgmt", GF_LOG_ERROR,
+ "failed to fetch volume file (key:%s)",
+ ctx->cmd_args.volfile_id);
+ break;
}
+ }
- if (is_mgmt_rpc_reconnect)
- glusterfs_mgmt_pmap_signin (ctx);
+ if (is_mgmt_rpc_reconnect)
+ glusterfs_mgmt_pmap_signin(ctx);
- break;
+ break;
default:
- break;
- }
+ break;
+ }
- if (need_term) {
- emancipate (ctx, emval);
- cleanup_and_exit (1);
- }
+ if (need_term) {
+ emancipate(ctx, emval);
+ cleanup_and_exit(1);
+ }
- return 0;
+ return 0;
}
int
-glusterfs_rpcsvc_notify (rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
- void *data)
+glusterfs_rpcsvc_notify(rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
+ void *data)
{
- if (!xl || !data) {
- goto out;
- }
+ if (!xl || !data) {
+ goto out;
+ }
- switch (event) {
- case RPCSVC_EVENT_ACCEPT:
- {
- break;
+ switch (event) {
+ case RPCSVC_EVENT_ACCEPT: {
+ break;
}
- case RPCSVC_EVENT_DISCONNECT:
- {
- break;
+ case RPCSVC_EVENT_DISCONNECT: {
+ break;
}
default:
- break;
- }
+ break;
+ }
out:
- return 0;
+ return 0;
}
int
-glusterfs_listener_init (glusterfs_ctx_t *ctx)
+glusterfs_listener_init(glusterfs_ctx_t *ctx)
{
- cmd_args_t *cmd_args = NULL;
- rpcsvc_t *rpc = NULL;
- dict_t *options = NULL;
- int ret = -1;
+ cmd_args_t *cmd_args = NULL;
+ rpcsvc_t *rpc = NULL;
+ dict_t *options = NULL;
+ int ret = -1;
- cmd_args = &ctx->cmd_args;
+ cmd_args = &ctx->cmd_args;
- if (ctx->listener)
- return 0;
+ if (ctx->listener)
+ return 0;
- if (!cmd_args->sock_file)
- return 0;
+ if (!cmd_args->sock_file)
+ return 0;
- ret = rpcsvc_transport_unix_options_build (&options,
- cmd_args->sock_file);
- if (ret)
- goto out;
+ ret = rpcsvc_transport_unix_options_build(&options, cmd_args->sock_file);
+ if (ret)
+ goto out;
- rpc = rpcsvc_init (THIS, ctx, options, 8);
- if (rpc == NULL) {
- goto out;
- }
+ rpc = rpcsvc_init(THIS, ctx, options, 8);
+ if (rpc == NULL) {
+ goto out;
+ }
- ret = rpcsvc_register_notify (rpc, glusterfs_rpcsvc_notify, THIS);
- if (ret) {
- goto out;
- }
+ ret = rpcsvc_register_notify(rpc, glusterfs_rpcsvc_notify, THIS);
+ if (ret) {
+ goto out;
+ }
- ret = rpcsvc_create_listeners (rpc, options, "glusterfsd");
- if (ret < 1) {
- goto out;
- }
+ ret = rpcsvc_create_listeners(rpc, options, "glusterfsd");
+ if (ret < 1) {
+ goto out;
+ }
- ret = rpcsvc_program_register (rpc, &glusterfs_mop_prog, _gf_false);
- if (ret) {
- goto out;
- }
+ ret = rpcsvc_program_register(rpc, &glusterfs_mop_prog, _gf_false);
+ if (ret) {
+ goto out;
+ }
- ctx->listener = rpc;
+ ctx->listener = rpc;
out:
- return ret;
+ return ret;
}
int
-glusterfs_listener_stop (glusterfs_ctx_t *ctx)
+glusterfs_listener_stop(glusterfs_ctx_t *ctx)
{
- cmd_args_t *cmd_args = NULL;
- rpcsvc_t *rpc = NULL;
- rpcsvc_listener_t *listener = NULL;
- rpcsvc_listener_t *next = NULL;
- int ret = 0;
- xlator_t *this = NULL;
+ cmd_args_t *cmd_args = NULL;
+ rpcsvc_t *rpc = NULL;
+ rpcsvc_listener_t *listener = NULL;
+ rpcsvc_listener_t *next = NULL;
+ int ret = 0;
+ xlator_t *this = NULL;
- GF_ASSERT (ctx);
+ GF_ASSERT(ctx);
- rpc = ctx->listener;
- ctx->listener = NULL;
+ rpc = ctx->listener;
+ ctx->listener = NULL;
- (void) rpcsvc_program_unregister(rpc, &glusterfs_mop_prog);
+ (void)rpcsvc_program_unregister(rpc, &glusterfs_mop_prog);
- list_for_each_entry_safe (listener, next, &rpc->listeners, list) {
- rpcsvc_listener_destroy (listener);
- }
+ list_for_each_entry_safe(listener, next, &rpc->listeners, list)
+ {
+ rpcsvc_listener_destroy(listener);
+ }
- (void) rpcsvc_unregister_notify (rpc, glusterfs_rpcsvc_notify, THIS);
+ (void)rpcsvc_unregister_notify(rpc, glusterfs_rpcsvc_notify, THIS);
- GF_FREE (rpc);
+ GF_FREE(rpc);
- cmd_args = &ctx->cmd_args;
- if (cmd_args->sock_file) {
- ret = sys_unlink (cmd_args->sock_file);
- if (ret && (ENOENT == errno)) {
- ret = 0;
- }
+ cmd_args = &ctx->cmd_args;
+ if (cmd_args->sock_file) {
+ ret = sys_unlink(cmd_args->sock_file);
+ if (ret && (ENOENT == errno)) {
+ ret = 0;
}
+ }
- if (ret) {
- this = THIS;
- gf_log (this->name, GF_LOG_ERROR, "Failed to unlink listener "
- "socket %s, error: %s", cmd_args->sock_file,
- strerror (errno));
- }
- return ret;
+ if (ret) {
+ this = THIS;
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to unlink listener "
+ "socket %s, error: %s",
+ cmd_args->sock_file, strerror(errno));
+ }
+ return ret;
}
int
-glusterfs_mgmt_notify (int32_t op, void *data, ...)
+glusterfs_mgmt_notify(int32_t op, void *data, ...)
{
- int ret = 0;
- switch (op)
- {
- case GF_EN_DEFRAG_STATUS:
- ret = glusterfs_rebalance_event_notify ((dict_t*) data);
- break;
+ int ret = 0;
+ switch (op) {
+ case GF_EN_DEFRAG_STATUS:
+ ret = glusterfs_rebalance_event_notify((dict_t *)data);
+ break;
- default:
- gf_log ("", GF_LOG_ERROR, "Invalid op");
- break;
- }
+ default:
+ gf_log("", GF_LOG_ERROR, "Invalid op");
+ break;
+ }
- return ret;
+ return ret;
}
int
-glusterfs_mgmt_init (glusterfs_ctx_t *ctx)
+glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
{
- cmd_args_t *cmd_args = NULL;
- struct rpc_clnt *rpc = NULL;
- dict_t *options = NULL;
- int ret = -1;
- int port = GF_DEFAULT_BASE_PORT;
- char *host = NULL;
+ cmd_args_t *cmd_args = NULL;
+ struct rpc_clnt *rpc = NULL;
+ dict_t *options = NULL;
+ int ret = -1;
+ int port = GF_DEFAULT_BASE_PORT;
+ char *host = NULL;
- cmd_args = &ctx->cmd_args;
- GF_VALIDATE_OR_GOTO (THIS->name, cmd_args->volfile_server, out);
+ cmd_args = &ctx->cmd_args;
+ GF_VALIDATE_OR_GOTO(THIS->name, cmd_args->volfile_server, out);
- if (ctx->mgmt)
- return 0;
-
- LOCK_INIT (&ctx->volfile_lock);
-
- if (cmd_args->volfile_server_port)
- port = cmd_args->volfile_server_port;
-
- host = cmd_args->volfile_server;
+ if (ctx->mgmt)
+ return 0;
- if (cmd_args->volfile_server_transport &&
- !strcmp (cmd_args->volfile_server_transport, "unix")) {
- ret = rpc_transport_unix_options_build (&options, host, 0);
- } else {
- ret = rpc_transport_inet_options_build (&options, host, port);
- }
- if (ret)
- goto out;
+ LOCK_INIT(&ctx->volfile_lock);
- /* Explicitly turn on encrypted transport. */
- if (ctx->secure_mgmt) {
- ret = dict_set_dynstr_with_alloc
- (options, "transport.socket.ssl-enabled", "yes");
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "failed to set 'transport.socket.ssl-enabled' "
- "in options dict");
- goto out;
+ if (cmd_args->volfile_server_port)
+ port = cmd_args->volfile_server_port;
- }
+ host = cmd_args->volfile_server;
- ctx->ssl_cert_depth = glusterfs_read_secure_access_file ();
- }
+ if (cmd_args->volfile_server_transport &&
+ !strcmp(cmd_args->volfile_server_transport, "unix")) {
+ ret = rpc_transport_unix_options_build(&options, host, 0);
+ } else {
+ ret = rpc_transport_inet_options_build(&options, host, port);
+ }
+ if (ret)
+ goto out;
- rpc = rpc_clnt_new (options, THIS, THIS->name, 8);
- if (!rpc) {
- ret = -1;
- gf_log (THIS->name, GF_LOG_WARNING, "failed to create rpc clnt");
- goto out;
- }
-
- ret = rpc_clnt_register_notify (rpc, mgmt_rpc_notify, THIS);
+ /* Explicitly turn on encrypted transport. */
+ if (ctx->secure_mgmt) {
+ ret = dict_set_dynstr_with_alloc(options,
+ "transport.socket.ssl-enabled", "yes");
if (ret) {
- gf_log (THIS->name, GF_LOG_WARNING,
- "failed to register notify function");
- goto out;
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "failed to set 'transport.socket.ssl-enabled' "
+ "in options dict");
+ goto out;
}
- ret = rpcclnt_cbk_program_register (rpc, &mgmt_cbk_prog, THIS);
- if (ret) {
- gf_log (THIS->name, GF_LOG_WARNING,
- "failed to register callback function");
- goto out;
- }
-
- ctx->notify = glusterfs_mgmt_notify;
-
- /* This value should be set before doing the 'rpc_clnt_start()' as
- the notify function uses this variable */
- ctx->mgmt = rpc;
+ ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
+ }
- ret = rpc_clnt_start (rpc);
+ rpc = rpc_clnt_new(options, THIS, THIS->name, 8);
+ if (!rpc) {
+ ret = -1;
+ gf_log(THIS->name, GF_LOG_WARNING, "failed to create rpc clnt");
+ goto out;
+ }
+
+ ret = rpc_clnt_register_notify(rpc, mgmt_rpc_notify, THIS);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_WARNING,
+ "failed to register notify function");
+ goto out;
+ }
+
+ ret = rpcclnt_cbk_program_register(rpc, &mgmt_cbk_prog, THIS);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_WARNING,
+ "failed to register callback function");
+ goto out;
+ }
+
+ ctx->notify = glusterfs_mgmt_notify;
+
+ /* This value should be set before doing the 'rpc_clnt_start()' as
+ the notify function uses this variable */
+ ctx->mgmt = rpc;
+
+ ret = rpc_clnt_start(rpc);
out:
- return ret;
+ return ret;
}
static int
-mgmt_pmap_signin2_cbk (struct rpc_req *req, struct iovec *iov, int count,
- void *myframe)
+mgmt_pmap_signin2_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- pmap_signin_rsp rsp = {0,};
- glusterfs_ctx_t *ctx = NULL;
- call_frame_t *frame = NULL;
- int ret = 0;
+ pmap_signin_rsp rsp = {
+ 0,
+ };
+ glusterfs_ctx_t *ctx = NULL;
+ call_frame_t *frame = NULL;
+ int ret = 0;
- ctx = glusterfsd_ctx;
- frame = myframe;
+ ctx = glusterfsd_ctx;
+ frame = myframe;
- if (-1 == req->rpc_status) {
- ret = -1;
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
- goto out;
- }
-
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);
- if (ret < 0) {
- gf_log (frame->this->name, GF_LOG_ERROR, "XDR decode error");
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
- goto out;
- }
-
- if (-1 == rsp.op_ret) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "failed to register the port with glusterd");
- ret = -1;
- goto out;
- }
+ if (-1 == req->rpc_status) {
+ ret = -1;
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);
+ if (ret < 0) {
+ gf_log(frame->this->name, GF_LOG_ERROR, "XDR decode error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "failed to register the port with glusterd");
+ ret = -1;
+ goto out;
+ }
- ret = 0;
+ ret = 0;
out:
- if (need_emancipate)
- emancipate (ctx, ret);
-
- STACK_DESTROY (frame->root);
- return 0;
+ if (need_emancipate)
+ emancipate(ctx, ret);
+ STACK_DESTROY(frame->root);
+ return 0;
}
static int
-mgmt_pmap_signin_cbk (struct rpc_req *req, struct iovec *iov, int count,
- void *myframe)
+mgmt_pmap_signin_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- pmap_signin_rsp rsp = {0,};
- call_frame_t *frame = NULL;
- int ret = 0;
- int emancipate_ret = -1;
- pmap_signin_req pmap_req = {0, };
- cmd_args_t *cmd_args = NULL;
- glusterfs_ctx_t *ctx = NULL;
- char brick_name[PATH_MAX] = {0,};
-
- frame = myframe;
- ctx = glusterfsd_ctx;
- cmd_args = &ctx->cmd_args;
-
-
- if (-1 == req->rpc_status) {
- ret = -1;
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
- goto out;
- }
-
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);
- if (ret < 0) {
- gf_log (frame->this->name, GF_LOG_ERROR, "XDR decode error");
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
- goto out;
- }
-
- if (-1 == rsp.op_ret) {
- gf_log (frame->this->name, GF_LOG_ERROR,
- "failed to register the port with glusterd");
- ret = -1;
- goto out;
- }
+ pmap_signin_rsp rsp = {
+ 0,
+ };
+ call_frame_t *frame = NULL;
+ int ret = 0;
+ int emancipate_ret = -1;
+ pmap_signin_req pmap_req = {
+ 0,
+ };
+ cmd_args_t *cmd_args = NULL;
+ glusterfs_ctx_t *ctx = NULL;
+ char brick_name[PATH_MAX] = {
+ 0,
+ };
+
+ frame = myframe;
+ ctx = glusterfsd_ctx;
+ cmd_args = &ctx->cmd_args;
+
+ if (-1 == req->rpc_status) {
+ ret = -1;
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);
+ if (ret < 0) {
+ gf_log(frame->this->name, GF_LOG_ERROR, "XDR decode error");
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "failed to register the port with glusterd");
+ ret = -1;
+ goto out;
+ }
- if (!cmd_args->brick_port2) {
- /* We are done with signin process */
- emancipate_ret = 0;
- goto out;
- }
+ if (!cmd_args->brick_port2) {
+ /* We are done with signin process */
+ emancipate_ret = 0;
+ goto out;
+ }
- snprintf (brick_name, PATH_MAX, "%s.rdma", cmd_args->brick_name);
- pmap_req.port = cmd_args->brick_port2;
- pmap_req.brick = brick_name;
+ snprintf(brick_name, PATH_MAX, "%s.rdma", cmd_args->brick_name);
+ pmap_req.port = cmd_args->brick_port2;
+ pmap_req.brick = brick_name;
- ret = mgmt_submit_request (&pmap_req, frame, ctx, &clnt_pmap_prog,
- GF_PMAP_SIGNIN, mgmt_pmap_signin2_cbk,
- (xdrproc_t)xdr_pmap_signin_req);
- if (ret)
- goto out;
+ ret = mgmt_submit_request(&pmap_req, frame, ctx, &clnt_pmap_prog,
+ GF_PMAP_SIGNIN, mgmt_pmap_signin2_cbk,
+ (xdrproc_t)xdr_pmap_signin_req);
+ if (ret)
+ goto out;
- return 0;
+ return 0;
out:
- if (need_emancipate && (ret < 0 || !cmd_args->brick_port2))
- emancipate (ctx, emancipate_ret);
+ if (need_emancipate && (ret < 0 || !cmd_args->brick_port2))
+ emancipate(ctx, emancipate_ret);
- STACK_DESTROY (frame->root);
- return 0;
+ STACK_DESTROY(frame->root);
+ return 0;
}
int
-glusterfs_mgmt_pmap_signin (glusterfs_ctx_t *ctx)
+glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
{
- call_frame_t *frame = NULL;
- pmap_signin_req req = {0, };
- int ret = -1;
- int emancipate_ret = -1;
- cmd_args_t *cmd_args = NULL;
- char brick_name[PATH_MAX] = {0,};
-
- frame = create_frame (THIS, ctx->pool);
- cmd_args = &ctx->cmd_args;
-
- if (!cmd_args->brick_port || !cmd_args->brick_name) {
- gf_log ("fsd-mgmt", GF_LOG_DEBUG,
- "portmapper signin arguments not given");
- emancipate_ret = 0;
- goto out;
- }
-
- if (cmd_args->volfile_server_transport &&
- !strcmp(cmd_args->volfile_server_transport, "rdma")) {
- snprintf (brick_name, sizeof(brick_name), "%s.rdma",
- cmd_args->brick_name);
- req.brick = brick_name;
- } else
- req.brick = cmd_args->brick_name;
-
- req.port = cmd_args->brick_port;
-
- ret = mgmt_submit_request (&req, frame, ctx, &clnt_pmap_prog,
- GF_PMAP_SIGNIN, mgmt_pmap_signin_cbk,
- (xdrproc_t)xdr_pmap_signin_req);
+ call_frame_t *frame = NULL;
+ pmap_signin_req req = {
+ 0,
+ };
+ int ret = -1;
+ int emancipate_ret = -1;
+ cmd_args_t *cmd_args = NULL;
+ char brick_name[PATH_MAX] = {
+ 0,
+ };
+
+ frame = create_frame(THIS, ctx->pool);
+ cmd_args = &ctx->cmd_args;
+
+ if (!cmd_args->brick_port || !cmd_args->brick_name) {
+ gf_log("fsd-mgmt", GF_LOG_DEBUG,
+ "portmapper signin arguments not given");
+ emancipate_ret = 0;
+ goto out;
+ }
+
+ if (cmd_args->volfile_server_transport &&
+ !strcmp(cmd_args->volfile_server_transport, "rdma")) {
+ snprintf(brick_name, sizeof(brick_name), "%s.rdma",
+ cmd_args->brick_name);
+ req.brick = brick_name;
+ } else
+ req.brick = cmd_args->brick_name;
+
+ req.port = cmd_args->brick_port;
+
+ ret = mgmt_submit_request(&req, frame, ctx, &clnt_pmap_prog, GF_PMAP_SIGNIN,
+ mgmt_pmap_signin_cbk,
+ (xdrproc_t)xdr_pmap_signin_req);
out:
- if (need_emancipate && ret < 0)
- emancipate (ctx, emancipate_ret);
- return ret;
+ if (need_emancipate && ret < 0)
+ emancipate(ctx, emancipate_ret);
+ return ret;
}
-
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index f374a2af22f..6e61a66c6d1 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -37,9 +37,9 @@
#include <linux/oom.h>
#else
#define OOM_SCORE_ADJ_MIN (-1000)
-#define OOM_SCORE_ADJ_MAX 1000
-#define OOM_DISABLE (-17)
-#define OOM_ADJUST_MAX 15
+#define OOM_SCORE_ADJ_MAX 1000
+#define OOM_DISABLE (-17)
+#define OOM_ADJUST_MAX 15
#endif
#endif
@@ -80,1485 +80,1445 @@
#include "daemon.h"
-
/* using argp for command line parsing */
static char gf_doc[] = "";
-static char argp_doc[] = "--volfile-server=SERVER [MOUNT-POINT]\n" \
- "--volfile=VOLFILE [MOUNT-POINT]";
-const char *argp_program_version = "" \
- PACKAGE_NAME" "PACKAGE_VERSION \
- "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION "\n" \
- "Copyright (c) 2006-2016 Red Hat, Inc. " \
- "<https://www.gluster.org/>\n" \
- "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n" \
- "It is licensed to you under your choice of the GNU Lesser\n" \
- "General Public License, version 3 or any later version (LGPLv3\n" \
- "or later), or the GNU General Public License, version 2 (GPLv2),\n" \
- "in all cases as published by the Free Software Foundation.";
+static char argp_doc[] =
+ "--volfile-server=SERVER [MOUNT-POINT]\n"
+ "--volfile=VOLFILE [MOUNT-POINT]";
+const char *argp_program_version =
+ "" PACKAGE_NAME " " PACKAGE_VERSION
+ "\nRepository revision: " GLUSTERFS_REPOSITORY_REVISION
+ "\n"
+ "Copyright (c) 2006-2016 Red Hat, Inc. "
+ "<https://www.gluster.org/>\n"
+ "GlusterFS comes with ABSOLUTELY NO WARRANTY.\n"
+ "It is licensed to you under your choice of the GNU Lesser\n"
+ "General Public License, version 3 or any later version (LGPLv3\n"
+ "or later), or the GNU General Public License, version 2 (GPLv2),\n"
+ "in all cases as published by the Free Software Foundation.";
const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
-static error_t parse_opts (int32_t key, char *arg, struct argp_state *_state);
+static error_t
+parse_opts(int32_t key, char *arg, struct argp_state *_state);
static struct argp_option gf_options[] = {
- {0, 0, 0, 0, "Basic options:"},
- {"volfile-server", ARGP_VOLFILE_SERVER_KEY, "SERVER", 0,
- "Server to get the volume file from. Unix domain socket path when "
- "transport type 'unix'. This option overrides --volfile option"},
- {"volfile", ARGP_VOLUME_FILE_KEY, "VOLFILE", 0,
- "File to use as VOLUME_FILE"},
- {"spec-file", ARGP_VOLUME_FILE_KEY, "VOLFILE", OPTION_HIDDEN,
- "File to use as VOLUME FILE"},
-
- {"log-level", ARGP_LOG_LEVEL_KEY, "LOGLEVEL", 0,
- "Logging severity. Valid options are DEBUG, INFO, WARNING, ERROR, "
- "CRITICAL, TRACE and NONE [default: INFO]"},
- {"log-file", ARGP_LOG_FILE_KEY, "LOGFILE", 0,
- "File to use for logging [default: "
- DEFAULT_LOG_FILE_DIRECTORY "/" PACKAGE_NAME ".log" "]"},
- {"logger", ARGP_LOGGER, "LOGGER", 0, "Set which logging sub-system to "
- "log to, valid options are: gluster-log and syslog, "
- "[default: \"gluster-log\"]"},
- {"log-format", ARGP_LOG_FORMAT, "LOG-FORMAT", 0, "Set log format, valid"
- " options are: no-msg-id and with-msg-id, [default: \"with-msg-id\"]"},
- {"log-buf-size", ARGP_LOG_BUF_SIZE, "LOG-BUF-SIZE", 0, "Set logging "
- "buffer size, [default: 5]"},
- {"log-flush-timeout", ARGP_LOG_FLUSH_TIMEOUT, "LOG-FLUSH-TIMEOUT", 0,
- "Set log flush timeout, [default: 2 minutes]"},
-
- {0, 0, 0, 0, "Advanced Options:"},
- {"volfile-server-port", ARGP_VOLFILE_SERVER_PORT_KEY, "PORT", 0,
- "Listening port number of volfile server"},
- {"volfile-server-transport", ARGP_VOLFILE_SERVER_TRANSPORT_KEY,
- "TRANSPORT", 0,
- "Transport type to get volfile from server [default: socket]"},
- {"volfile-id", ARGP_VOLFILE_ID_KEY, "KEY", 0,
- "'key' of the volfile to be fetched from server"},
- {"pid-file", ARGP_PID_FILE_KEY, "PIDFILE", 0,
- "File to use as pid file"},
- {"socket-file", ARGP_SOCK_FILE_KEY, "SOCKFILE", 0,
- "File to use as unix-socket"},
- {"no-daemon", ARGP_NO_DAEMON_KEY, 0, 0,
- "Run in foreground"},
- {"run-id", ARGP_RUN_ID_KEY, "RUN-ID", OPTION_HIDDEN,
- "Run ID for the process, used by scripts to keep track of process "
- "they started, defaults to none"},
- {"debug", ARGP_DEBUG_KEY, 0, 0,
- "Run in debug mode. This option sets --no-daemon, --log-level "
- "to DEBUG and --log-file to console"},
- {"volume-name", ARGP_VOLUME_NAME_KEY, "XLATOR-NAME", 0,
- "Translator name to be used for MOUNT-POINT [default: top most volume "
- "definition in VOLFILE]"},
- {"xlator-option", ARGP_XLATOR_OPTION_KEY,"XLATOR-NAME.OPTION=VALUE", 0,
- "Add/override an option for a translator in volume file with specified"
- " value"},
- {"read-only", ARGP_READ_ONLY_KEY, 0, 0,
- "Mount the filesystem in 'read-only' mode"},
- {"acl", ARGP_ACL_KEY, 0, 0,
- "Mount the filesystem with POSIX ACL support"},
- {"selinux", ARGP_SELINUX_KEY, 0, 0,
- "Enable SELinux label (extended attributes) support on inodes"},
- {"capability", ARGP_CAPABILITY_KEY, 0, 0,
- "Enable Capability (extended attributes) support on inodes"},
- {"subdir-mount", ARGP_SUBDIR_MOUNT_KEY, "SUBDIR-PATH", 0,
- "Mount subdirectory given [default: NULL]"},
-
- {"print-netgroups", ARGP_PRINT_NETGROUPS, "NETGROUP-FILE", 0,
- "Validate the netgroups file and print it out"},
- {"print-exports", ARGP_PRINT_EXPORTS, "EXPORTS-FILE", 0,
- "Validate the exports file and print it out"},
- {"print-xlatordir", ARGP_PRINT_XLATORDIR_KEY, 0, OPTION_ARG_OPTIONAL,
- "Print xlator directory path"},
- {"print-statedumpdir", ARGP_PRINT_STATEDUMPDIR_KEY, 0,
- OPTION_ARG_OPTIONAL,
- "Print directory path in which statedumps shall be generated"},
- {"print-logdir", ARGP_PRINT_LOGDIR_KEY, 0, OPTION_ARG_OPTIONAL,
- "Print path of default log directory"},
- {"print-libexecdir", ARGP_PRINT_LIBEXECDIR_KEY, 0, OPTION_ARG_OPTIONAL,
- "Print path of default libexec directory"},
-
- {"volfile-max-fetch-attempts", ARGP_VOLFILE_MAX_FETCH_ATTEMPTS, "0",
- OPTION_HIDDEN, "Maximum number of attempts to fetch the volfile"},
- {"aux-gfid-mount", ARGP_AUX_GFID_MOUNT_KEY, 0, 0,
- "Enable access to filesystem through gfid directly"},
- {"enable-ino32", ARGP_INODE32_KEY, "BOOL", OPTION_ARG_OPTIONAL,
- "Use 32-bit inodes when mounting to workaround broken applications"
- "that don't support 64-bit inodes"},
- {"worm", ARGP_WORM_KEY, 0, 0,
- "Mount the filesystem in 'worm' mode"},
- {"mac-compat", ARGP_MAC_COMPAT_KEY, "BOOL", OPTION_ARG_OPTIONAL,
- "Provide stubs for attributes needed for seamless operation on Macs "
+ {0, 0, 0, 0, "Basic options:"},
+ {"volfile-server", ARGP_VOLFILE_SERVER_KEY, "SERVER", 0,
+ "Server to get the volume file from. Unix domain socket path when "
+ "transport type 'unix'. This option overrides --volfile option"},
+ {"volfile", ARGP_VOLUME_FILE_KEY, "VOLFILE", 0,
+ "File to use as VOLUME_FILE"},
+ {"spec-file", ARGP_VOLUME_FILE_KEY, "VOLFILE", OPTION_HIDDEN,
+ "File to use as VOLUME FILE"},
+
+ {"log-level", ARGP_LOG_LEVEL_KEY, "LOGLEVEL", 0,
+ "Logging severity. Valid options are DEBUG, INFO, WARNING, ERROR, "
+ "CRITICAL, TRACE and NONE [default: INFO]"},
+ {"log-file", ARGP_LOG_FILE_KEY, "LOGFILE", 0,
+ "File to use for logging [default: " DEFAULT_LOG_FILE_DIRECTORY
+ "/" PACKAGE_NAME ".log"
+ "]"},
+ {"logger", ARGP_LOGGER, "LOGGER", 0,
+ "Set which logging sub-system to "
+ "log to, valid options are: gluster-log and syslog, "
+ "[default: \"gluster-log\"]"},
+ {"log-format", ARGP_LOG_FORMAT, "LOG-FORMAT", 0,
+ "Set log format, valid"
+ " options are: no-msg-id and with-msg-id, [default: \"with-msg-id\"]"},
+ {"log-buf-size", ARGP_LOG_BUF_SIZE, "LOG-BUF-SIZE", 0,
+ "Set logging "
+ "buffer size, [default: 5]"},
+ {"log-flush-timeout", ARGP_LOG_FLUSH_TIMEOUT, "LOG-FLUSH-TIMEOUT", 0,
+ "Set log flush timeout, [default: 2 minutes]"},
+
+ {0, 0, 0, 0, "Advanced Options:"},
+ {"volfile-server-port", ARGP_VOLFILE_SERVER_PORT_KEY, "PORT", 0,
+ "Listening port number of volfile server"},
+ {"volfile-server-transport", ARGP_VOLFILE_SERVER_TRANSPORT_KEY, "TRANSPORT",
+ 0, "Transport type to get volfile from server [default: socket]"},
+ {"volfile-id", ARGP_VOLFILE_ID_KEY, "KEY", 0,
+ "'key' of the volfile to be fetched from server"},
+ {"pid-file", ARGP_PID_FILE_KEY, "PIDFILE", 0, "File to use as pid file"},
+ {"socket-file", ARGP_SOCK_FILE_KEY, "SOCKFILE", 0,
+ "File to use as unix-socket"},
+ {"no-daemon", ARGP_NO_DAEMON_KEY, 0, 0, "Run in foreground"},
+ {"run-id", ARGP_RUN_ID_KEY, "RUN-ID", OPTION_HIDDEN,
+ "Run ID for the process, used by scripts to keep track of process "
+ "they started, defaults to none"},
+ {"debug", ARGP_DEBUG_KEY, 0, 0,
+ "Run in debug mode. This option sets --no-daemon, --log-level "
+ "to DEBUG and --log-file to console"},
+ {"volume-name", ARGP_VOLUME_NAME_KEY, "XLATOR-NAME", 0,
+ "Translator name to be used for MOUNT-POINT [default: top most volume "
+ "definition in VOLFILE]"},
+ {"xlator-option", ARGP_XLATOR_OPTION_KEY, "XLATOR-NAME.OPTION=VALUE", 0,
+ "Add/override an option for a translator in volume file with specified"
+ " value"},
+ {"read-only", ARGP_READ_ONLY_KEY, 0, 0,
+ "Mount the filesystem in 'read-only' mode"},
+ {"acl", ARGP_ACL_KEY, 0, 0, "Mount the filesystem with POSIX ACL support"},
+ {"selinux", ARGP_SELINUX_KEY, 0, 0,
+ "Enable SELinux label (extended attributes) support on inodes"},
+ {"capability", ARGP_CAPABILITY_KEY, 0, 0,
+ "Enable Capability (extended attributes) support on inodes"},
+ {"subdir-mount", ARGP_SUBDIR_MOUNT_KEY, "SUBDIR-PATH", 0,
+ "Mount subdirectory given [default: NULL]"},
+
+ {"print-netgroups", ARGP_PRINT_NETGROUPS, "NETGROUP-FILE", 0,
+ "Validate the netgroups file and print it out"},
+ {"print-exports", ARGP_PRINT_EXPORTS, "EXPORTS-FILE", 0,
+ "Validate the exports file and print it out"},
+ {"print-xlatordir", ARGP_PRINT_XLATORDIR_KEY, 0, OPTION_ARG_OPTIONAL,
+ "Print xlator directory path"},
+ {"print-statedumpdir", ARGP_PRINT_STATEDUMPDIR_KEY, 0, OPTION_ARG_OPTIONAL,
+ "Print directory path in which statedumps shall be generated"},
+ {"print-logdir", ARGP_PRINT_LOGDIR_KEY, 0, OPTION_ARG_OPTIONAL,
+ "Print path of default log directory"},
+ {"print-libexecdir", ARGP_PRINT_LIBEXECDIR_KEY, 0, OPTION_ARG_OPTIONAL,
+ "Print path of default libexec directory"},
+
+ {"volfile-max-fetch-attempts", ARGP_VOLFILE_MAX_FETCH_ATTEMPTS, "0",
+ OPTION_HIDDEN, "Maximum number of attempts to fetch the volfile"},
+ {"aux-gfid-mount", ARGP_AUX_GFID_MOUNT_KEY, 0, 0,
+ "Enable access to filesystem through gfid directly"},
+ {"enable-ino32", ARGP_INODE32_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "Use 32-bit inodes when mounting to workaround broken applications"
+ "that don't support 64-bit inodes"},
+ {"worm", ARGP_WORM_KEY, 0, 0, "Mount the filesystem in 'worm' mode"},
+ {"mac-compat", ARGP_MAC_COMPAT_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "Provide stubs for attributes needed for seamless operation on Macs "
#ifdef GF_DARWIN_HOST_OS
- "[default: \"on\" on client side, else \"off\"]"
+ "[default: \"on\" on client side, else \"off\"]"
#else
- "[default: \"off\"]"
+ "[default: \"off\"]"
#endif
- },
- {"brick-name", ARGP_BRICK_NAME_KEY, "BRICK-NAME", OPTION_HIDDEN,
- "Brick name to be registered with Gluster portmapper" },
- {"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN,
- "Brick Port to be registered with Gluster portmapper" },
- {"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, "BOOL", OPTION_ARG_OPTIONAL,
- "Do not purge the cache on file open"},
- {"global-timer-wheel", ARGP_GLOBAL_TIMER_WHEEL, "BOOL",
- OPTION_ARG_OPTIONAL, "Instantiate process global timer-wheel"},
- {"thin-client", ARGP_THIN_CLIENT_KEY, 0, 0,
- "Enables thin mount and connects via gfproxyd daemon"},
-
- {0, 0, 0, 0, "Fuse options:"},
- {"direct-io-mode", ARGP_DIRECT_IO_MODE_KEY, "BOOL|auto", OPTION_ARG_OPTIONAL,
- "Specify direct I/O strategy [default: \"auto\"]"},
- {"entry-timeout", ARGP_ENTRY_TIMEOUT_KEY, "SECONDS", 0,
- "Set entry timeout to SECONDS in fuse kernel module [default: 1]"},
- {"negative-timeout", ARGP_NEGATIVE_TIMEOUT_KEY, "SECONDS", 0,
- "Set negative timeout to SECONDS in fuse kernel module [default: 0]"},
- {"attribute-timeout", ARGP_ATTRIBUTE_TIMEOUT_KEY, "SECONDS", 0,
- "Set attribute timeout to SECONDS for inodes in fuse kernel module "
- "[default: 1]"},
- {"gid-timeout", ARGP_GID_TIMEOUT_KEY, "SECONDS", 0,
- "Set auxiliary group list timeout to SECONDS for fuse translator "
- "[default: 300]"},
- {"resolve-gids", ARGP_RESOLVE_GIDS_KEY, 0, 0,
- "Resolve all auxiliary groups in fuse translator (max 32 otherwise)"},
- {"background-qlen", ARGP_FUSE_BACKGROUND_QLEN_KEY, "N", 0,
- "Set fuse module's background queue length to N "
- "[default: 64]"},
- {"congestion-threshold", ARGP_FUSE_CONGESTION_THRESHOLD_KEY, "N", 0,
- "Set fuse module's congestion threshold to N "
- "[default: 48]"},
+ },
+ {"brick-name", ARGP_BRICK_NAME_KEY, "BRICK-NAME", OPTION_HIDDEN,
+ "Brick name to be registered with Gluster portmapper"},
+ {"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN,
+ "Brick Port to be registered with Gluster portmapper"},
+ {"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "Do not purge the cache on file open"},
+ {"global-timer-wheel", ARGP_GLOBAL_TIMER_WHEEL, "BOOL", OPTION_ARG_OPTIONAL,
+ "Instantiate process global timer-wheel"},
+ {"thin-client", ARGP_THIN_CLIENT_KEY, 0, 0,
+ "Enables thin mount and connects via gfproxyd daemon"},
+
+ {0, 0, 0, 0, "Fuse options:"},
+ {"direct-io-mode", ARGP_DIRECT_IO_MODE_KEY, "BOOL|auto",
+ OPTION_ARG_OPTIONAL, "Specify direct I/O strategy [default: \"auto\"]"},
+ {"entry-timeout", ARGP_ENTRY_TIMEOUT_KEY, "SECONDS", 0,
+ "Set entry timeout to SECONDS in fuse kernel module [default: 1]"},
+ {"negative-timeout", ARGP_NEGATIVE_TIMEOUT_KEY, "SECONDS", 0,
+ "Set negative timeout to SECONDS in fuse kernel module [default: 0]"},
+ {"attribute-timeout", ARGP_ATTRIBUTE_TIMEOUT_KEY, "SECONDS", 0,
+ "Set attribute timeout to SECONDS for inodes in fuse kernel module "
+ "[default: 1]"},
+ {"gid-timeout", ARGP_GID_TIMEOUT_KEY, "SECONDS", 0,
+ "Set auxiliary group list timeout to SECONDS for fuse translator "
+ "[default: 300]"},
+ {"resolve-gids", ARGP_RESOLVE_GIDS_KEY, 0, 0,
+ "Resolve all auxiliary groups in fuse translator (max 32 otherwise)"},
+ {"background-qlen", ARGP_FUSE_BACKGROUND_QLEN_KEY, "N", 0,
+ "Set fuse module's background queue length to N "
+ "[default: 64]"},
+ {"congestion-threshold", ARGP_FUSE_CONGESTION_THRESHOLD_KEY, "N", 0,
+ "Set fuse module's congestion threshold to N "
+ "[default: 48]"},
#ifdef GF_LINUX_HOST_OS
- {"oom-score-adj", ARGP_OOM_SCORE_ADJ_KEY, "INTEGER", 0,
- "Set oom_score_adj value for process"
- "[default: 0]"},
+ {"oom-score-adj", ARGP_OOM_SCORE_ADJ_KEY, "INTEGER", 0,
+ "Set oom_score_adj value for process"
+ "[default: 0]"},
#endif
- {"client-pid", ARGP_CLIENT_PID_KEY, "PID", OPTION_HIDDEN,
- "client will authenticate itself with process id PID to server"},
- {"no-root-squash", ARGP_FUSE_NO_ROOT_SQUASH_KEY, "BOOL",
- OPTION_ARG_OPTIONAL, "disable/enable root squashing for the trusted "
- "client"},
- {"user-map-root", ARGP_USER_MAP_ROOT_KEY, "USER", OPTION_HIDDEN,
- "replace USER with root in messages"},
- {"dump-fuse", ARGP_DUMP_FUSE_KEY, "PATH", 0,
- "Dump fuse traffic to PATH"},
- {"volfile-check", ARGP_VOLFILE_CHECK_KEY, 0, 0,
- "Enable strict volume file checking"},
- {"no-mem-accounting", ARGP_MEM_ACCOUNTING_KEY, 0, OPTION_HIDDEN,
- "disable internal memory accounting"},
- {"fuse-mountopts", ARGP_FUSE_MOUNTOPTS_KEY, "OPTIONS", OPTION_HIDDEN,
- "Extra mount options to pass to FUSE"},
- {"use-readdirp", ARGP_FUSE_USE_READDIRP_KEY, "BOOL", OPTION_ARG_OPTIONAL,
- "Use readdirp mode in fuse kernel module"
- " [default: \"yes\"]"},
- {"secure-mgmt", ARGP_SECURE_MGMT_KEY, "BOOL", OPTION_ARG_OPTIONAL,
- "Override default for secure (SSL) management connections"},
- {"localtime-logging", ARGP_LOCALTIME_LOGGING_KEY, 0, 0,
- "Enable localtime logging"},
- {"process-name", ARGP_PROCESS_NAME_KEY, "PROCESS-NAME", OPTION_HIDDEN,
- "option to specify the process type" },
- {"event-history", ARGP_FUSE_EVENT_HISTORY_KEY, "BOOL",
- OPTION_ARG_OPTIONAL, "disable/enable fuse event-history"},
- {"reader-thread-count", ARGP_READER_THREAD_COUNT_KEY, "INTEGER",
- OPTION_ARG_OPTIONAL, "set fuse reader thread count"},
- {"kernel-writeback-cache", ARGP_KERNEL_WRITEBACK_CACHE_KEY, "BOOL",
- OPTION_ARG_OPTIONAL, "enable fuse in-kernel writeback cache"},
- {"attr-times-granularity", ARGP_ATTR_TIMES_GRANULARITY_KEY, "NS",
- OPTION_ARG_OPTIONAL, "declare supported granularity of file attribute"
- " times in nanoseconds"},
- {0, 0, 0, 0, "Miscellaneous Options:"},
- {0, }
-};
-
-
-static struct argp argp = { gf_options, parse_opts, argp_doc, gf_doc };
-
-
-int glusterfs_pidfile_cleanup (glusterfs_ctx_t *ctx);
-int glusterfs_volumes_init (glusterfs_ctx_t *ctx);
-int glusterfs_mgmt_init (glusterfs_ctx_t *ctx);
-int glusterfs_listener_init (glusterfs_ctx_t *ctx);
-int glusterfs_listener_stop (glusterfs_ctx_t *ctx);
+ {"client-pid", ARGP_CLIENT_PID_KEY, "PID", OPTION_HIDDEN,
+ "client will authenticate itself with process id PID to server"},
+ {"no-root-squash", ARGP_FUSE_NO_ROOT_SQUASH_KEY, "BOOL",
+ OPTION_ARG_OPTIONAL,
+ "disable/enable root squashing for the trusted "
+ "client"},
+ {"user-map-root", ARGP_USER_MAP_ROOT_KEY, "USER", OPTION_HIDDEN,
+ "replace USER with root in messages"},
+ {"dump-fuse", ARGP_DUMP_FUSE_KEY, "PATH", 0, "Dump fuse traffic to PATH"},
+ {"volfile-check", ARGP_VOLFILE_CHECK_KEY, 0, 0,
+ "Enable strict volume file checking"},
+ {"no-mem-accounting", ARGP_MEM_ACCOUNTING_KEY, 0, OPTION_HIDDEN,
+ "disable internal memory accounting"},
+ {"fuse-mountopts", ARGP_FUSE_MOUNTOPTS_KEY, "OPTIONS", OPTION_HIDDEN,
+ "Extra mount options to pass to FUSE"},
+ {"use-readdirp", ARGP_FUSE_USE_READDIRP_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "Use readdirp mode in fuse kernel module"
+ " [default: \"yes\"]"},
+ {"secure-mgmt", ARGP_SECURE_MGMT_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "Override default for secure (SSL) management connections"},
+ {"localtime-logging", ARGP_LOCALTIME_LOGGING_KEY, 0, 0,
+ "Enable localtime logging"},
+ {"process-name", ARGP_PROCESS_NAME_KEY, "PROCESS-NAME", OPTION_HIDDEN,
+ "option to specify the process type"},
+ {"event-history", ARGP_FUSE_EVENT_HISTORY_KEY, "BOOL", OPTION_ARG_OPTIONAL,
+ "disable/enable fuse event-history"},
+ {"reader-thread-count", ARGP_READER_THREAD_COUNT_KEY, "INTEGER",
+ OPTION_ARG_OPTIONAL, "set fuse reader thread count"},
+ {"kernel-writeback-cache", ARGP_KERNEL_WRITEBACK_CACHE_KEY, "BOOL",
+ OPTION_ARG_OPTIONAL, "enable fuse in-kernel writeback cache"},
+ {"attr-times-granularity", ARGP_ATTR_TIMES_GRANULARITY_KEY, "NS",
+ OPTION_ARG_OPTIONAL,
+ "declare supported granularity of file attribute"
+ " times in nanoseconds"},
+ {0, 0, 0, 0, "Miscellaneous Options:"},
+ {
+ 0,
+ }};
+
+static struct argp argp = {gf_options, parse_opts, argp_doc, gf_doc};
+int
+glusterfs_pidfile_cleanup(glusterfs_ctx_t *ctx);
+int
+glusterfs_volumes_init(glusterfs_ctx_t *ctx);
+int
+glusterfs_mgmt_init(glusterfs_ctx_t *ctx);
+int
+glusterfs_listener_init(glusterfs_ctx_t *ctx);
+int
+glusterfs_listener_stop(glusterfs_ctx_t *ctx);
static int
-set_fuse_mount_options (glusterfs_ctx_t *ctx, dict_t *options)
+set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
{
- int ret = 0;
- cmd_args_t *cmd_args = NULL;
- char *mount_point = NULL;
- char cwd[PATH_MAX] = {0,};
+ int ret = 0;
+ cmd_args_t *cmd_args = NULL;
+ char *mount_point = NULL;
+ char cwd[PATH_MAX] = {
+ 0,
+ };
+
+ cmd_args = &ctx->cmd_args;
+
+ /* Check if mount-point is absolute path,
+ * if not convert to absolute path by concatenating with CWD
+ */
+ if (cmd_args->mount_point[0] != '/') {
+ if (getcwd(cwd, PATH_MAX) != NULL) {
+ ret = gf_asprintf(&mount_point, "%s/%s", cwd,
+ cmd_args->mount_point);
+ if (ret == -1) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_1,
+ "Could not create absolute mountpoint "
+ "path");
+ goto err;
+ }
+ } else {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_2,
+ "Could not get current working directory");
+ goto err;
+ }
+ } else
+ mount_point = gf_strdup(cmd_args->mount_point);
- cmd_args = &ctx->cmd_args;
+ ret = dict_set_dynstr(options, ZR_MOUNTPOINT_OPT, mount_point);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3,
+ "failed to set mount-point to options dictionary");
+ goto err;
+ }
- /* Check if mount-point is absolute path,
- * if not convert to absolute path by concatenating with CWD
- */
- if (cmd_args->mount_point[0] != '/') {
- if (getcwd (cwd, PATH_MAX) != NULL) {
- ret = gf_asprintf (&mount_point, "%s/%s", cwd,
- cmd_args->mount_point);
- if (ret == -1) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno,
- glusterfsd_msg_1,
- "Could not create absolute mountpoint "
- "path");
- goto err;
- }
- } else {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno,
- glusterfsd_msg_2,
- "Could not get current working directory");
- goto err;
- }
- } else
- mount_point = gf_strdup (cmd_args->mount_point);
+ if (cmd_args->fuse_attribute_timeout >= 0) {
+ ret = dict_set_double(options, ZR_ATTR_TIMEOUT_OPT,
+ cmd_args->fuse_attribute_timeout);
- ret = dict_set_dynstr (options, ZR_MOUNTPOINT_OPT, mount_point);
if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3,
- "failed to set mount-point to options dictionary");
- goto err;
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_4,
+ "failed to set dict value "
+ "for key " ZR_ATTR_TIMEOUT_OPT);
+ goto err;
}
+ }
- if (cmd_args->fuse_attribute_timeout >= 0) {
- ret = dict_set_double (options, ZR_ATTR_TIMEOUT_OPT,
- cmd_args->fuse_attribute_timeout);
+ if (cmd_args->fuse_entry_timeout >= 0) {
+ ret = dict_set_double(options, ZR_ENTRY_TIMEOUT_OPT,
+ cmd_args->fuse_entry_timeout);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key " ZR_ENTRY_TIMEOUT_OPT);
+ goto err;
+ }
+ }
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno,
- glusterfsd_msg_4, "failed to set dict value "
- "for key " ZR_ATTR_TIMEOUT_OPT);
- goto err;
- }
+ if (cmd_args->fuse_negative_timeout >= 0) {
+ ret = dict_set_double(options, ZR_NEGATIVE_TIMEOUT_OPT,
+ cmd_args->fuse_negative_timeout);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key " ZR_NEGATIVE_TIMEOUT_OPT);
+ goto err;
}
+ }
- if (cmd_args->fuse_entry_timeout >= 0) {
- ret = dict_set_double (options, ZR_ENTRY_TIMEOUT_OPT,
- cmd_args->fuse_entry_timeout);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- ZR_ENTRY_TIMEOUT_OPT);
- goto err;
- }
+ if (cmd_args->client_pid_set) {
+ ret = dict_set_int32(options, "client-pid", cmd_args->client_pid);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key client-pid");
+ goto err;
}
+ }
- if (cmd_args->fuse_negative_timeout >= 0) {
- ret = dict_set_double (options, ZR_NEGATIVE_TIMEOUT_OPT,
- cmd_args->fuse_negative_timeout);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- ZR_NEGATIVE_TIMEOUT_OPT);
- goto err;
- }
+ if (cmd_args->uid_map_root) {
+ ret = dict_set_int32(options, "uid-map-root", cmd_args->uid_map_root);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "uid-map-root");
+ goto err;
}
+ }
- if (cmd_args->client_pid_set) {
- ret = dict_set_int32 (options, "client-pid",
- cmd_args->client_pid);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key client-pid");
- goto err;
- }
+ if (cmd_args->volfile_check) {
+ ret = dict_set_int32(options, ZR_STRICT_VOLFILE_CHECK,
+ cmd_args->volfile_check);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key " ZR_STRICT_VOLFILE_CHECK);
+ goto err;
}
+ }
- if (cmd_args->uid_map_root) {
- ret = dict_set_int32 (options, "uid-map-root",
- cmd_args->uid_map_root);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "uid-map-root");
- goto err;
- }
+ if (cmd_args->dump_fuse) {
+ ret = dict_set_static_ptr(options, ZR_DUMP_FUSE, cmd_args->dump_fuse);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key " ZR_DUMP_FUSE);
+ goto err;
}
+ }
- if (cmd_args->volfile_check) {
- ret = dict_set_int32 (options, ZR_STRICT_VOLFILE_CHECK,
- cmd_args->volfile_check);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- ZR_STRICT_VOLFILE_CHECK);
- goto err;
- }
+ if (cmd_args->acl) {
+ ret = dict_set_static_ptr(options, "acl", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key acl");
+ goto err;
}
+ }
- if (cmd_args->dump_fuse) {
- ret = dict_set_static_ptr (options, ZR_DUMP_FUSE,
- cmd_args->dump_fuse);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- ZR_DUMP_FUSE);
- goto err;
- }
+ if (cmd_args->selinux) {
+ ret = dict_set_static_ptr(options, "selinux", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key selinux");
+ goto err;
}
+ }
- if (cmd_args->acl) {
- ret = dict_set_static_ptr (options, "acl", "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key acl");
- goto err;
- }
+ if (cmd_args->capability) {
+ ret = dict_set_static_ptr(options, "capability", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key capability");
+ goto err;
}
+ }
- if (cmd_args->selinux) {
- ret = dict_set_static_ptr (options, "selinux", "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key selinux");
- goto err;
- }
+ if (cmd_args->aux_gfid_mount) {
+ ret = dict_set_static_ptr(options, "virtual-gfid-access", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "aux-gfid-mount");
+ goto err;
}
+ }
- if (cmd_args->capability) {
- ret = dict_set_static_ptr (options, "capability", "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key capability");
- goto err;
- }
+ if (cmd_args->enable_ino32) {
+ ret = dict_set_static_ptr(options, "enable-ino32", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "enable-ino32");
+ goto err;
}
+ }
- if (cmd_args->aux_gfid_mount) {
- ret = dict_set_static_ptr (options, "virtual-gfid-access",
- "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "aux-gfid-mount");
- goto err;
- }
+ if (cmd_args->read_only) {
+ ret = dict_set_static_ptr(options, "read-only", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key read-only");
+ goto err;
}
+ }
- if (cmd_args->enable_ino32) {
- ret = dict_set_static_ptr (options, "enable-ino32", "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "enable-ino32");
- goto err;
- }
+ switch (cmd_args->fopen_keep_cache) {
+ case GF_OPTION_ENABLE:
+ ret = dict_set_static_ptr(options, "fopen-keep-cache", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "fopen-keep-cache");
+ goto err;
+ }
+ break;
+ case GF_OPTION_DISABLE:
+ ret = dict_set_static_ptr(options, "fopen-keep-cache", "off");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "fopen-keep-cache");
+ goto err;
+ }
+ break;
+ case GF_OPTION_DEFERRED: /* default */
+ default:
+ gf_msg_debug("glusterfsd", 0, "fopen-keep-cache mode %d",
+ cmd_args->fopen_keep_cache);
+ break;
+ }
+
+ if (cmd_args->gid_timeout_set) {
+ ret = dict_set_int32(options, "gid-timeout", cmd_args->gid_timeout);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key gid-timeout");
+ goto err;
}
+ }
- if (cmd_args->read_only) {
- ret = dict_set_static_ptr (options, "read-only", "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key read-only");
- goto err;
- }
+ if (cmd_args->resolve_gids) {
+ ret = dict_set_static_ptr(options, "resolve-gids", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "resolve-gids");
+ goto err;
}
+ }
- switch (cmd_args->fopen_keep_cache) {
- case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "fopen-keep-cache",
- "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "fopen-keep-cache");
- goto err;
- }
- break;
- case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "fopen-keep-cache",
- "off");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "fopen-keep-cache");
- goto err;
- }
- break;
- case GF_OPTION_DEFERRED: /* default */
- default:
- gf_msg_debug ("glusterfsd", 0, "fopen-keep-cache mode %d",
- cmd_args->fopen_keep_cache);
- break;
- }
-
- if (cmd_args->gid_timeout_set) {
- ret = dict_set_int32(options, "gid-timeout",
- cmd_args->gid_timeout);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key gid-timeout");
- goto err;
- }
- }
-
- if (cmd_args->resolve_gids) {
- ret = dict_set_static_ptr (options, "resolve-gids", "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "resolve-gids");
- goto err;
- }
+ if (cmd_args->background_qlen) {
+ ret = dict_set_int32(options, "background-qlen",
+ cmd_args->background_qlen);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "background-qlen");
+ goto err;
+ }
+ }
+ if (cmd_args->congestion_threshold) {
+ ret = dict_set_int32(options, "congestion-threshold",
+ cmd_args->congestion_threshold);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "congestion-threshold");
+ goto err;
}
+ }
- if (cmd_args->background_qlen) {
- ret = dict_set_int32 (options, "background-qlen",
- cmd_args->background_qlen);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "background-qlen");
- goto err;
- }
- }
- if (cmd_args->congestion_threshold) {
- ret = dict_set_int32 (options, "congestion-threshold",
- cmd_args->congestion_threshold);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "congestion-threshold");
- goto err;
- }
- }
-
- switch (cmd_args->fuse_direct_io_mode) {
+ switch (cmd_args->fuse_direct_io_mode) {
case GF_OPTION_DISABLE: /* disable */
- ret = dict_set_static_ptr (options, ZR_DIRECT_IO_OPT,
- "disable");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
- "failed to set 'disable' for key "
- ZR_DIRECT_IO_OPT);
- goto err;
- }
- break;
+ ret = dict_set_static_ptr(options, ZR_DIRECT_IO_OPT, "disable");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
+ "failed to set 'disable' for key " ZR_DIRECT_IO_OPT);
+ goto err;
+ }
+ break;
case GF_OPTION_ENABLE: /* enable */
- ret = dict_set_static_ptr (options, ZR_DIRECT_IO_OPT,
- "enable");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
- "failed to set 'enable' for key "
- ZR_DIRECT_IO_OPT);
- goto err;
- }
- break;
+ ret = dict_set_static_ptr(options, ZR_DIRECT_IO_OPT, "enable");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
+ "failed to set 'enable' for key " ZR_DIRECT_IO_OPT);
+ goto err;
+ }
+ break;
case GF_OPTION_DEFERRED: /* auto */
default:
- gf_msg_debug ("glusterfsd", 0, "fuse direct io type %d",
- cmd_args->fuse_direct_io_mode);
- break;
- }
+ gf_msg_debug("glusterfsd", 0, "fuse direct io type %d",
+ cmd_args->fuse_direct_io_mode);
+ break;
+ }
- switch (cmd_args->no_root_squash) {
+ switch (cmd_args->no_root_squash) {
case GF_OPTION_ENABLE: /* enable */
- ret = dict_set_static_ptr (options, "no-root-squash",
- "enable");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
- "failed to set 'enable' for key "
- "no-root-squash");
- goto err;
- }
- break;
+ ret = dict_set_static_ptr(options, "no-root-squash", "enable");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
+ "failed to set 'enable' for key "
+ "no-root-squash");
+ goto err;
+ }
+ break;
case GF_OPTION_DISABLE: /* disable/default */
default:
- ret = dict_set_static_ptr (options, "no-root-squash",
- "disable");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
- "failed to set 'disable' for key "
- "no-root-squash");
- goto err;
- }
- gf_msg_debug ("glusterfsd", 0, "fuse no-root-squash mode %d",
- cmd_args->no_root_squash);
- break;
- }
-
- if (!cmd_args->no_daemon_mode) {
- ret = dict_set_static_ptr (options, "sync-to-mount",
- "enable");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key sync-mtab");
- goto err;
- }
+ ret = dict_set_static_ptr(options, "no-root-squash", "disable");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
+ "failed to set 'disable' for key "
+ "no-root-squash");
+ goto err;
+ }
+ gf_msg_debug("glusterfsd", 0, "fuse no-root-squash mode %d",
+ cmd_args->no_root_squash);
+ break;
+ }
+
+ if (!cmd_args->no_daemon_mode) {
+ ret = dict_set_static_ptr(options, "sync-to-mount", "enable");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key sync-mtab");
+ goto err;
}
+ }
- if (cmd_args->use_readdirp) {
- ret = dict_set_str (options, "use-readdirp",
- cmd_args->use_readdirp);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "use-readdirp");
- goto err;
- }
- }
- if (cmd_args->event_history) {
- ret = dict_set_str (options, "event-history",
- cmd_args->event_history);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "event-history");
- goto err;
- }
- }
- if (cmd_args->thin_client) {
- ret = dict_set_static_ptr (options, "thin-client", "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "thin-client");
- goto err;
- }
- }
- if (cmd_args->reader_thread_count) {
- ret = dict_set_uint32 (options, "reader-thread-count",
- cmd_args->reader_thread_count);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "reader-thread-count");
- goto err;
- }
+ if (cmd_args->use_readdirp) {
+ ret = dict_set_str(options, "use-readdirp", cmd_args->use_readdirp);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "use-readdirp");
+ goto err;
+ }
+ }
+ if (cmd_args->event_history) {
+ ret = dict_set_str(options, "event-history", cmd_args->event_history);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "event-history");
+ goto err;
+ }
+ }
+ if (cmd_args->thin_client) {
+ ret = dict_set_static_ptr(options, "thin-client", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "thin-client");
+ goto err;
+ }
+ }
+ if (cmd_args->reader_thread_count) {
+ ret = dict_set_uint32(options, "reader-thread-count",
+ cmd_args->reader_thread_count);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "reader-thread-count");
+ goto err;
}
- switch (cmd_args->kernel_writeback_cache) {
+ }
+ switch (cmd_args->kernel_writeback_cache) {
case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "kernel-writeback-cache",
- "on");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "kernel-writeback-cache");
- goto err;
- }
- break;
+ ret = dict_set_static_ptr(options, "kernel-writeback-cache", "on");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "kernel-writeback-cache");
+ goto err;
+ }
+ break;
case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "kernel-writeback-cache",
- "off");
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "kernel-writeback-cache");
- goto err;
- }
- break;
+ ret = dict_set_static_ptr(options, "kernel-writeback-cache", "off");
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "kernel-writeback-cache");
+ goto err;
+ }
+ break;
case GF_OPTION_DEFERRED: /* default */
default:
- gf_msg_debug ("glusterfsd", 0, "kernel-writeback-cache mode %d",
- cmd_args->kernel_writeback_cache);
- break;
- }
- if (cmd_args->attr_times_granularity) {
- ret = dict_set_uint32 (options, "attr-times-granularity",
- cmd_args->attr_times_granularity);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "attr-times-granularity");
- goto err;
- }
+ gf_msg_debug("glusterfsd", 0, "kernel-writeback-cache mode %d",
+ cmd_args->kernel_writeback_cache);
+ break;
+ }
+ if (cmd_args->attr_times_granularity) {
+ ret = dict_set_uint32(options, "attr-times-granularity",
+ cmd_args->attr_times_granularity);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key "
+ "attr-times-granularity");
+ goto err;
}
+ }
-
- ret = 0;
+ ret = 0;
err:
- return ret;
+ return ret;
}
int
-create_fuse_mount (glusterfs_ctx_t *ctx)
+create_fuse_mount(glusterfs_ctx_t *ctx)
{
- int ret = 0;
- cmd_args_t *cmd_args = NULL;
- xlator_t *master = NULL;
+ int ret = 0;
+ cmd_args_t *cmd_args = NULL;
+ xlator_t *master = NULL;
- cmd_args = &ctx->cmd_args;
+ cmd_args = &ctx->cmd_args;
- if (!cmd_args->mount_point) {
- gf_msg_trace ("glusterfsd", 0,
- "mount point not found, not a client process");
- return 0;
- }
-
- if (ctx->process_mode != GF_CLIENT_PROCESS) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7,
- "Not a client process, not performing mount operation");
- return -1;
- }
-
- master = GF_CALLOC (1, sizeof (*master),
- gfd_mt_xlator_t);
- if (!master)
- goto err;
-
- master->name = gf_strdup ("fuse");
- if (!master->name)
- goto err;
-
- if (xlator_set_type (master, "mount/fuse") == -1) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8,
- "MOUNT-POINT %s initialization failed",
- cmd_args->mount_point);
- goto err;
- }
-
- master->ctx = ctx;
- master->options = get_new_dict ();
- if (!master->options)
- goto err;
-
- ret = set_fuse_mount_options (ctx, master->options);
- if (ret)
- goto err;
-
- if (cmd_args->fuse_mountopts) {
- ret = dict_set_static_ptr (master->options, ZR_FUSE_MOUNTOPTS,
- cmd_args->fuse_mountopts);
- if (ret < 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- ZR_FUSE_MOUNTOPTS);
- goto err;
- }
+ if (!cmd_args->mount_point) {
+ gf_msg_trace("glusterfsd", 0,
+ "mount point not found, not a client process");
+ return 0;
+ }
+
+ if (ctx->process_mode != GF_CLIENT_PROCESS) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7,
+ "Not a client process, not performing mount operation");
+ return -1;
+ }
+
+ master = GF_CALLOC(1, sizeof(*master), gfd_mt_xlator_t);
+ if (!master)
+ goto err;
+
+ master->name = gf_strdup("fuse");
+ if (!master->name)
+ goto err;
+
+ if (xlator_set_type(master, "mount/fuse") == -1) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8,
+ "MOUNT-POINT %s initialization failed", cmd_args->mount_point);
+ goto err;
+ }
+
+ master->ctx = ctx;
+ master->options = get_new_dict();
+ if (!master->options)
+ goto err;
+
+ ret = set_fuse_mount_options(ctx, master->options);
+ if (ret)
+ goto err;
+
+ if (cmd_args->fuse_mountopts) {
+ ret = dict_set_static_ptr(master->options, ZR_FUSE_MOUNTOPTS,
+ cmd_args->fuse_mountopts);
+ if (ret < 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
+ "failed to set dict value for key " ZR_FUSE_MOUNTOPTS);
+ goto err;
}
+ }
- ret = xlator_init (master);
- if (ret) {
- gf_msg_debug ("glusterfsd", 0,
- "failed to initialize fuse translator");
- goto err;
- }
+ ret = xlator_init(master);
+ if (ret) {
+ gf_msg_debug("glusterfsd", 0, "failed to initialize fuse translator");
+ goto err;
+ }
- ctx->master = master;
+ ctx->master = master;
- return 0;
+ return 0;
err:
- if (master) {
- xlator_destroy (master);
- }
+ if (master) {
+ xlator_destroy(master);
+ }
- return 1;
+ return 1;
}
-
static FILE *
-get_volfp (glusterfs_ctx_t *ctx)
+get_volfp(glusterfs_ctx_t *ctx)
{
- int ret = 0;
- cmd_args_t *cmd_args = NULL;
- FILE *specfp = NULL;
- struct stat statbuf;
+ int ret = 0;
+ cmd_args_t *cmd_args = NULL;
+ FILE *specfp = NULL;
+ struct stat statbuf;
- cmd_args = &ctx->cmd_args;
+ cmd_args = &ctx->cmd_args;
- ret = sys_lstat (cmd_args->volfile, &statbuf);
- if (ret == -1) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
- "loading volume file %s failed", cmd_args->volfile);
- return NULL;
- }
+ ret = sys_lstat(cmd_args->volfile, &statbuf);
+ if (ret == -1) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
+ "loading volume file %s failed", cmd_args->volfile);
+ return NULL;
+ }
- if ((specfp = fopen (cmd_args->volfile, "r")) == NULL) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
- "loading volume file %s failed", cmd_args->volfile);
- return NULL;
- }
+ if ((specfp = fopen(cmd_args->volfile, "r")) == NULL) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
+ "loading volume file %s failed", cmd_args->volfile);
+ return NULL;
+ }
- gf_msg_debug ("glusterfsd", 0, "loading volume file %s",
- cmd_args->volfile);
+ gf_msg_debug("glusterfsd", 0, "loading volume file %s", cmd_args->volfile);
- return specfp;
+ return specfp;
}
static int
-gf_remember_backup_volfile_server (char *arg)
+gf_remember_backup_volfile_server(char *arg)
{
- glusterfs_ctx_t *ctx = NULL;
- cmd_args_t *cmd_args = NULL;
- int ret = -1;
-
- ctx = glusterfsd_ctx;
- if (!ctx)
- goto out;
- cmd_args = &ctx->cmd_args;
-
- if(!cmd_args)
- goto out;
-
- ret = gf_set_volfile_server_common(cmd_args, arg,
- GF_DEFAULT_VOLFILE_TRANSPORT,
- GF_DEFAULT_BASE_PORT);
- if (ret) {
- gf_log ("glusterfs", GF_LOG_ERROR,
- "failed to set volfile server: %s", strerror (errno));
- }
+ glusterfs_ctx_t *ctx = NULL;
+ cmd_args_t *cmd_args = NULL;
+ int ret = -1;
+
+ ctx = glusterfsd_ctx;
+ if (!ctx)
+ goto out;
+ cmd_args = &ctx->cmd_args;
+
+ if (!cmd_args)
+ goto out;
+
+ ret = gf_set_volfile_server_common(
+ cmd_args, arg, GF_DEFAULT_VOLFILE_TRANSPORT, GF_DEFAULT_BASE_PORT);
+ if (ret) {
+ gf_log("glusterfs", GF_LOG_ERROR, "failed to set volfile server: %s",
+ strerror(errno));
+ }
out:
- return ret;
+ return ret;
}
static int
-gf_remember_xlator_option (char *arg)
+gf_remember_xlator_option(char *arg)
{
- glusterfs_ctx_t *ctx = NULL;
- cmd_args_t *cmd_args = NULL;
- xlator_cmdline_option_t *option = NULL;
- int ret = -1;
- char *dot = NULL;
- char *equals = NULL;
-
- ctx = glusterfsd_ctx;
- cmd_args = &ctx->cmd_args;
-
- option = GF_CALLOC (1, sizeof (xlator_cmdline_option_t),
- gfd_mt_xlator_cmdline_option_t);
- if (!option)
- goto out;
-
- INIT_LIST_HEAD (&option->cmd_args);
-
- dot = strchr (arg, '.');
- if (!dot) {
- gf_msg ("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
- goto out;
- }
-
- option->volume = GF_MALLOC ((dot - arg) + 1, gfd_mt_char);
- if (!option->volume)
- goto out;
-
- strncpy (option->volume, arg, (dot - arg));
- option->volume[(dot - arg)] = '\0';
-
- equals = strchr (arg, '=');
- if (!equals) {
- gf_msg ("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
- goto out;
- }
-
- option->key = GF_MALLOC ((equals - dot) + 1, gfd_mt_char);
- if (!option->key)
- goto out;
-
- strncpy (option->key, dot + 1, (equals - dot - 1));
- option->key[(equals - dot - 1)] = '\0';
-
- if (!*(equals + 1)) {
- gf_msg ("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
- goto out;
- }
-
- option->value = gf_strdup (equals + 1);
-
- list_add (&option->cmd_args, &cmd_args->xlator_options);
-
- ret = 0;
+ glusterfs_ctx_t *ctx = NULL;
+ cmd_args_t *cmd_args = NULL;
+ xlator_cmdline_option_t *option = NULL;
+ int ret = -1;
+ char *dot = NULL;
+ char *equals = NULL;
+
+ ctx = glusterfsd_ctx;
+ cmd_args = &ctx->cmd_args;
+
+ option = GF_CALLOC(1, sizeof(xlator_cmdline_option_t),
+ gfd_mt_xlator_cmdline_option_t);
+ if (!option)
+ goto out;
+
+ INIT_LIST_HEAD(&option->cmd_args);
+
+ dot = strchr(arg, '.');
+ if (!dot) {
+ gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
+ "xlator option %s is invalid", arg);
+ goto out;
+ }
+
+ option->volume = GF_MALLOC((dot - arg) + 1, gfd_mt_char);
+ if (!option->volume)
+ goto out;
+
+ strncpy(option->volume, arg, (dot - arg));
+ option->volume[(dot - arg)] = '\0';
+
+ equals = strchr(arg, '=');
+ if (!equals) {
+ gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
+ "xlator option %s is invalid", arg);
+ goto out;
+ }
+
+ option->key = GF_MALLOC((equals - dot) + 1, gfd_mt_char);
+ if (!option->key)
+ goto out;
+
+ strncpy(option->key, dot + 1, (equals - dot - 1));
+ option->key[(equals - dot - 1)] = '\0';
+
+ if (!*(equals + 1)) {
+ gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
+ "xlator option %s is invalid", arg);
+ goto out;
+ }
+
+ option->value = gf_strdup(equals + 1);
+
+ list_add(&option->cmd_args, &cmd_args->xlator_options);
+
+ ret = 0;
out:
- if (ret == -1) {
- if (option) {
- GF_FREE (option->volume);
- GF_FREE (option->key);
- GF_FREE (option->value);
+ if (ret == -1) {
+ if (option) {
+ GF_FREE(option->volume);
+ GF_FREE(option->key);
+ GF_FREE(option->value);
- GF_FREE (option);
- }
+ GF_FREE(option);
}
+ }
- return ret;
+ return ret;
}
-
#ifdef GF_LINUX_HOST_OS
static struct oom_api_info {
- char *oom_api_file;
- int32_t oom_min;
- int32_t oom_max;
+ char *oom_api_file;
+ int32_t oom_min;
+ int32_t oom_max;
} oom_api_info[] = {
- { "/proc/self/oom_score_adj", OOM_SCORE_ADJ_MIN, OOM_SCORE_ADJ_MAX },
- { "/proc/self/oom_adj", OOM_DISABLE, OOM_ADJUST_MAX },
- { NULL, 0, 0 }
-};
-
+ {"/proc/self/oom_score_adj", OOM_SCORE_ADJ_MIN, OOM_SCORE_ADJ_MAX},
+ {"/proc/self/oom_adj", OOM_DISABLE, OOM_ADJUST_MAX},
+ {NULL, 0, 0}};
static struct oom_api_info *
-get_oom_api_info (void)
+get_oom_api_info(void)
{
- struct oom_api_info *api = NULL;
+ struct oom_api_info *api = NULL;
- for (api = oom_api_info; api->oom_api_file; api++) {
- if (sys_access (api->oom_api_file, F_OK) != -1) {
- return api;
- }
+ for (api = oom_api_info; api->oom_api_file; api++) {
+ if (sys_access(api->oom_api_file, F_OK) != -1) {
+ return api;
}
+ }
- return NULL;
+ return NULL;
}
#endif
static error_t
-parse_opts (int key, char *arg, struct argp_state *state)
+parse_opts(int key, char *arg, struct argp_state *state)
{
- cmd_args_t *cmd_args = NULL;
- uint32_t n = 0;
+ cmd_args_t *cmd_args = NULL;
+ uint32_t n = 0;
#ifdef GF_LINUX_HOST_OS
- int32_t k = 0;
- struct oom_api_info *api = NULL;
+ int32_t k = 0;
+ struct oom_api_info *api = NULL;
#endif
- double d = 0.0;
- gf_boolean_t b = _gf_false;
- char *pwd = NULL;
- char *tmp_str = NULL;
- char *port_str = NULL;
- struct passwd *pw = NULL;
- int ret = 0;
+ double d = 0.0;
+ gf_boolean_t b = _gf_false;
+ char *pwd = NULL;
+ char *tmp_str = NULL;
+ char *port_str = NULL;
+ struct passwd *pw = NULL;
+ int ret = 0;
- cmd_args = state->input;
+ cmd_args = state->input;
- switch (key) {
+ switch (key) {
case ARGP_VOLFILE_SERVER_KEY:
- gf_remember_backup_volfile_server (arg);
+ gf_remember_backup_volfile_server(arg);
- break;
+ break;
case ARGP_READ_ONLY_KEY:
- cmd_args->read_only = 1;
- break;
+ cmd_args->read_only = 1;
+ break;
case ARGP_ACL_KEY:
- cmd_args->acl = 1;
- gf_remember_xlator_option ("*-md-cache.cache-posix-acl=true");
- break;
+ cmd_args->acl = 1;
+ gf_remember_xlator_option("*-md-cache.cache-posix-acl=true");
+ break;
case ARGP_SELINUX_KEY:
- cmd_args->selinux = 1;
- gf_remember_xlator_option ("*-md-cache.cache-selinux=true");
- break;
+ cmd_args->selinux = 1;
+ gf_remember_xlator_option("*-md-cache.cache-selinux=true");
+ break;
case ARGP_CAPABILITY_KEY:
- cmd_args->capability = 1;
- break;
+ cmd_args->capability = 1;
+ break;
case ARGP_AUX_GFID_MOUNT_KEY:
- cmd_args->aux_gfid_mount = 1;
- break;
+ cmd_args->aux_gfid_mount = 1;
+ break;
case ARGP_INODE32_KEY:
- cmd_args->enable_ino32 = 1;
- break;
+ cmd_args->enable_ino32 = 1;
+ break;
case ARGP_WORM_KEY:
- cmd_args->worm = 1;
- break;
+ cmd_args->worm = 1;
+ break;
case ARGP_PRINT_NETGROUPS:
- cmd_args->print_netgroups = arg;
- break;
+ cmd_args->print_netgroups = arg;
+ break;
case ARGP_PRINT_EXPORTS:
- cmd_args->print_exports = arg;
- break;
+ cmd_args->print_exports = arg;
+ break;
case ARGP_PRINT_XLATORDIR_KEY:
- cmd_args->print_xlatordir = _gf_true;
- break;
+ cmd_args->print_xlatordir = _gf_true;
+ break;
case ARGP_PRINT_STATEDUMPDIR_KEY:
- cmd_args->print_statedumpdir = _gf_true;
- break;
+ cmd_args->print_statedumpdir = _gf_true;
+ break;
case ARGP_PRINT_LOGDIR_KEY:
- cmd_args->print_logdir = _gf_true;
- break;
+ cmd_args->print_logdir = _gf_true;
+ break;
case ARGP_PRINT_LIBEXECDIR_KEY:
- cmd_args->print_libexecdir = _gf_true;
- break;
+ cmd_args->print_libexecdir = _gf_true;
+ break;
case ARGP_MAC_COMPAT_KEY:
- if (!arg)
- arg = "on";
-
- if (gf_string2boolean (arg, &b) == 0) {
- cmd_args->mac_compat = b;
+ if (!arg)
+ arg = "on";
- break;
- }
+ if (gf_string2boolean(arg, &b) == 0) {
+ cmd_args->mac_compat = b;
- argp_failure (state, -1, 0,
- "invalid value \"%s\" for mac-compat", arg);
break;
+ }
+
+ argp_failure(state, -1, 0, "invalid value \"%s\" for mac-compat",
+ arg);
+ break;
case ARGP_VOLUME_FILE_KEY:
- GF_FREE (cmd_args->volfile);
-
- if (arg[0] != '/') {
- pwd = getcwd (NULL, PATH_MAX);
- if (!pwd) {
- argp_failure (state, -1, errno,
- "getcwd failed with error no %d",
- errno);
- break;
- }
- char tmp_buf[1024];
- snprintf (tmp_buf, sizeof(tmp_buf), "%s/%s", pwd, arg);
- cmd_args->volfile = gf_strdup (tmp_buf);
- free (pwd);
- } else {
- cmd_args->volfile = gf_strdup (arg);
+ GF_FREE(cmd_args->volfile);
+
+ if (arg[0] != '/') {
+ pwd = getcwd(NULL, PATH_MAX);
+ if (!pwd) {
+ argp_failure(state, -1, errno,
+ "getcwd failed with error no %d", errno);
+ break;
}
+ char tmp_buf[1024];
+ snprintf(tmp_buf, sizeof(tmp_buf), "%s/%s", pwd, arg);
+ cmd_args->volfile = gf_strdup(tmp_buf);
+ free(pwd);
+ } else {
+ cmd_args->volfile = gf_strdup(arg);
+ }
- break;
+ break;
case ARGP_LOG_LEVEL_KEY:
- if (strcasecmp (arg, ARGP_LOG_LEVEL_NONE_OPTION) == 0) {
- cmd_args->log_level = GF_LOG_NONE;
- break;
- }
- if (strcasecmp (arg, ARGP_LOG_LEVEL_CRITICAL_OPTION) == 0) {
- cmd_args->log_level = GF_LOG_CRITICAL;
- break;
- }
- if (strcasecmp (arg, ARGP_LOG_LEVEL_ERROR_OPTION) == 0) {
- cmd_args->log_level = GF_LOG_ERROR;
- break;
- }
- if (strcasecmp (arg, ARGP_LOG_LEVEL_WARNING_OPTION) == 0) {
- cmd_args->log_level = GF_LOG_WARNING;
- break;
- }
- if (strcasecmp (arg, ARGP_LOG_LEVEL_INFO_OPTION) == 0) {
- cmd_args->log_level = GF_LOG_INFO;
- break;
- }
- if (strcasecmp (arg, ARGP_LOG_LEVEL_DEBUG_OPTION) == 0) {
- cmd_args->log_level = GF_LOG_DEBUG;
- break;
- }
- if (strcasecmp (arg, ARGP_LOG_LEVEL_TRACE_OPTION) == 0) {
- cmd_args->log_level = GF_LOG_TRACE;
- break;
- }
-
- argp_failure (state, -1, 0, "unknown log level %s", arg);
+ if (strcasecmp(arg, ARGP_LOG_LEVEL_NONE_OPTION) == 0) {
+ cmd_args->log_level = GF_LOG_NONE;
+ break;
+ }
+ if (strcasecmp(arg, ARGP_LOG_LEVEL_CRITICAL_OPTION) == 0) {
+ cmd_args->log_level = GF_LOG_CRITICAL;
+ break;
+ }
+ if (strcasecmp(arg, ARGP_LOG_LEVEL_ERROR_OPTION) == 0) {
+ cmd_args->log_level = GF_LOG_ERROR;
+ break;
+ }
+ if (strcasecmp(arg, ARGP_LOG_LEVEL_WARNING_OPTION) == 0) {
+ cmd_args->log_level = GF_LOG_WARNING;
+ break;
+ }
+ if (strcasecmp(arg, ARGP_LOG_LEVEL_INFO_OPTION) == 0) {
+ cmd_args->log_level = GF_LOG_INFO;
break;
+ }
+ if (strcasecmp(arg, ARGP_LOG_LEVEL_DEBUG_OPTION) == 0) {
+ cmd_args->log_level = GF_LOG_DEBUG;
+ break;
+ }
+ if (strcasecmp(arg, ARGP_LOG_LEVEL_TRACE_OPTION) == 0) {
+ cmd_args->log_level = GF_LOG_TRACE;
+ break;
+ }
+
+ argp_failure(state, -1, 0, "unknown log level %s", arg);
+ break;
case ARGP_LOG_FILE_KEY:
- cmd_args->log_file = gf_strdup (arg);
- break;
+ cmd_args->log_file = gf_strdup(arg);
+ break;
case ARGP_VOLFILE_SERVER_PORT_KEY:
- n = 0;
+ n = 0;
- if (gf_string2uint_base10 (arg, &n) == 0) {
- cmd_args->volfile_server_port = n;
- break;
- }
-
- argp_failure (state, -1, 0,
- "unknown volfile server port %s", arg);
+ if (gf_string2uint_base10(arg, &n) == 0) {
+ cmd_args->volfile_server_port = n;
break;
+ }
+
+ argp_failure(state, -1, 0, "unknown volfile server port %s", arg);
+ break;
case ARGP_VOLFILE_SERVER_TRANSPORT_KEY:
- cmd_args->volfile_server_transport = gf_strdup (arg);
- break;
+ cmd_args->volfile_server_transport = gf_strdup(arg);
+ break;
case ARGP_VOLFILE_ID_KEY:
- cmd_args->volfile_id = gf_strdup (arg);
- break;
+ cmd_args->volfile_id = gf_strdup(arg);
+ break;
case ARGP_THIN_CLIENT_KEY:
- cmd_args->thin_client = _gf_true;
- break;
+ cmd_args->thin_client = _gf_true;
+ break;
case ARGP_PID_FILE_KEY:
- cmd_args->pid_file = gf_strdup (arg);
- break;
+ cmd_args->pid_file = gf_strdup(arg);
+ break;
case ARGP_SOCK_FILE_KEY:
- cmd_args->sock_file = gf_strdup (arg);
- break;
+ cmd_args->sock_file = gf_strdup(arg);
+ break;
case ARGP_NO_DAEMON_KEY:
- cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE;
- break;
+ cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE;
+ break;
case ARGP_RUN_ID_KEY:
- cmd_args->run_id = gf_strdup (arg);
- break;
+ cmd_args->run_id = gf_strdup(arg);
+ break;
case ARGP_DEBUG_KEY:
- cmd_args->debug_mode = ENABLE_DEBUG_MODE;
- break;
+ cmd_args->debug_mode = ENABLE_DEBUG_MODE;
+ break;
case ARGP_VOLFILE_MAX_FETCH_ATTEMPTS:
- cmd_args->max_connect_attempts = 1;
- break;
+ cmd_args->max_connect_attempts = 1;
+ break;
case ARGP_DIRECT_IO_MODE_KEY:
- if (!arg)
- arg = "on";
+ if (!arg)
+ arg = "on";
- if (gf_string2boolean (arg, &b) == 0) {
- cmd_args->fuse_direct_io_mode = b;
-
- break;
- }
+ if (gf_string2boolean(arg, &b) == 0) {
+ cmd_args->fuse_direct_io_mode = b;
- if (strcmp (arg, "auto") == 0)
- break;
+ break;
+ }
- argp_failure (state, -1, 0,
- "unknown direct I/O mode setting \"%s\"", arg);
+ if (strcmp(arg, "auto") == 0)
break;
+ argp_failure(state, -1, 0, "unknown direct I/O mode setting \"%s\"",
+ arg);
+ break;
+
case ARGP_FUSE_NO_ROOT_SQUASH_KEY:
- cmd_args->no_root_squash = _gf_true;
- break;
+ cmd_args->no_root_squash = _gf_true;
+ break;
case ARGP_ENTRY_TIMEOUT_KEY:
- d = 0.0;
+ d = 0.0;
- gf_string2double (arg, &d);
- if (!(d < 0.0)) {
- cmd_args->fuse_entry_timeout = d;
- break;
- }
-
- argp_failure (state, -1, 0, "unknown entry timeout %s", arg);
+ gf_string2double(arg, &d);
+ if (!(d < 0.0)) {
+ cmd_args->fuse_entry_timeout = d;
break;
+ }
- case ARGP_NEGATIVE_TIMEOUT_KEY:
- d = 0.0;
+ argp_failure(state, -1, 0, "unknown entry timeout %s", arg);
+ break;
- ret = gf_string2double (arg, &d);
- if ((ret == 0) && !(d < 0.0)) {
- cmd_args->fuse_negative_timeout = d;
- break;
- }
+ case ARGP_NEGATIVE_TIMEOUT_KEY:
+ d = 0.0;
- argp_failure (state, -1, 0, "unknown negative timeout %s", arg);
+ ret = gf_string2double(arg, &d);
+ if ((ret == 0) && !(d < 0.0)) {
+ cmd_args->fuse_negative_timeout = d;
break;
+ }
- case ARGP_ATTRIBUTE_TIMEOUT_KEY:
- d = 0.0;
+ argp_failure(state, -1, 0, "unknown negative timeout %s", arg);
+ break;
- gf_string2double (arg, &d);
- if (!(d < 0.0)) {
- cmd_args->fuse_attribute_timeout = d;
- break;
- }
+ case ARGP_ATTRIBUTE_TIMEOUT_KEY:
+ d = 0.0;
- argp_failure (state, -1, 0,
- "unknown attribute timeout %s", arg);
+ gf_string2double(arg, &d);
+ if (!(d < 0.0)) {
+ cmd_args->fuse_attribute_timeout = d;
break;
+ }
- case ARGP_CLIENT_PID_KEY:
- if (gf_string2int (arg, &cmd_args->client_pid) == 0) {
- cmd_args->client_pid_set = 1;
- break;
- }
+ argp_failure(state, -1, 0, "unknown attribute timeout %s", arg);
+ break;
- argp_failure (state, -1, 0,
- "unknown client pid %s", arg);
+ case ARGP_CLIENT_PID_KEY:
+ if (gf_string2int(arg, &cmd_args->client_pid) == 0) {
+ cmd_args->client_pid_set = 1;
break;
+ }
+
+ argp_failure(state, -1, 0, "unknown client pid %s", arg);
+ break;
case ARGP_USER_MAP_ROOT_KEY:
- pw = getpwnam (arg);
- if (pw)
- cmd_args->uid_map_root = pw->pw_uid;
- else
- argp_failure (state, -1, 0,
- "user %s does not exist", arg);
- break;
+ pw = getpwnam(arg);
+ if (pw)
+ cmd_args->uid_map_root = pw->pw_uid;
+ else
+ argp_failure(state, -1, 0, "user %s does not exist", arg);
+ break;
case ARGP_VOLFILE_CHECK_KEY:
- cmd_args->volfile_check = 1;
- break;
+ cmd_args->volfile_check = 1;
+ break;
case ARGP_VOLUME_NAME_KEY:
- cmd_args->volume_name = gf_strdup (arg);
- break;
+ cmd_args->volume_name = gf_strdup(arg);
+ break;
case ARGP_XLATOR_OPTION_KEY:
- if (gf_remember_xlator_option (arg))
- argp_failure (state, -1, 0, "invalid xlator option %s",
- arg);
+ if (gf_remember_xlator_option(arg))
+ argp_failure(state, -1, 0, "invalid xlator option %s", arg);
- break;
+ break;
case ARGP_KEY_NO_ARGS:
- break;
+ break;
case ARGP_KEY_ARG:
- if (state->arg_num >= 1)
- argp_usage (state);
+ if (state->arg_num >= 1)
+ argp_usage(state);
- cmd_args->mount_point = gf_strdup (arg);
- break;
+ cmd_args->mount_point = gf_strdup(arg);
+ break;
case ARGP_DUMP_FUSE_KEY:
- cmd_args->dump_fuse = gf_strdup (arg);
- break;
+ cmd_args->dump_fuse = gf_strdup(arg);
+ break;
case ARGP_BRICK_NAME_KEY:
- cmd_args->brick_name = gf_strdup (arg);
- break;
+ cmd_args->brick_name = gf_strdup(arg);
+ break;
case ARGP_BRICK_PORT_KEY:
- n = 0;
-
- port_str = strtok_r (arg, ",", &tmp_str);
- if (gf_string2uint_base10 (port_str, &n) == 0) {
- cmd_args->brick_port = n;
- port_str = strtok_r (NULL, ",", &tmp_str);
- if (port_str) {
- if (gf_string2uint_base10 (port_str, &n) == 0) {
- cmd_args->brick_port2 = n;
- break;
- }
- argp_failure (state, -1, 0,
- "wrong brick (listen) port %s", arg);
- }
+ n = 0;
+
+ port_str = strtok_r(arg, ",", &tmp_str);
+ if (gf_string2uint_base10(port_str, &n) == 0) {
+ cmd_args->brick_port = n;
+ port_str = strtok_r(NULL, ",", &tmp_str);
+ if (port_str) {
+ if (gf_string2uint_base10(port_str, &n) == 0) {
+ cmd_args->brick_port2 = n;
break;
+ }
+ argp_failure(state, -1, 0, "wrong brick (listen) port %s",
+ arg);
}
-
- argp_failure (state, -1, 0,
- "unknown brick (listen) port %s", arg);
break;
+ }
+
+ argp_failure(state, -1, 0, "unknown brick (listen) port %s", arg);
+ break;
case ARGP_MEM_ACCOUNTING_KEY:
- /* TODO: it should have got handled much earlier */
- //gf_mem_acct_enable_set (THIS->ctx);
- break;
+ /* TODO: it should have got handled much earlier */
+ // gf_mem_acct_enable_set (THIS->ctx);
+ break;
- case ARGP_FOPEN_KEEP_CACHE_KEY:
- if (!arg)
- arg = "on";
+ case ARGP_FOPEN_KEEP_CACHE_KEY:
+ if (!arg)
+ arg = "on";
- if (gf_string2boolean (arg, &b) == 0) {
- cmd_args->fopen_keep_cache = b;
+ if (gf_string2boolean(arg, &b) == 0) {
+ cmd_args->fopen_keep_cache = b;
- break;
- }
+ break;
+ }
- argp_failure (state, -1, 0,
- "unknown cache setting \"%s\"", arg);
+ argp_failure(state, -1, 0, "unknown cache setting \"%s\"", arg);
- break;
+ break;
case ARGP_GLOBAL_TIMER_WHEEL:
- cmd_args->global_timer_wheel = 1;
- break;
+ cmd_args->global_timer_wheel = 1;
+ break;
- case ARGP_GID_TIMEOUT_KEY:
- if (!gf_string2int(arg, &cmd_args->gid_timeout)) {
- cmd_args->gid_timeout_set = _gf_true;
- break;
- }
+ case ARGP_GID_TIMEOUT_KEY:
+ if (!gf_string2int(arg, &cmd_args->gid_timeout)) {
+ cmd_args->gid_timeout_set = _gf_true;
+ break;
+ }
- argp_failure(state, -1, 0, "unknown group list timeout %s", arg);
- break;
+ argp_failure(state, -1, 0, "unknown group list timeout %s", arg);
+ break;
case ARGP_RESOLVE_GIDS_KEY:
- cmd_args->resolve_gids = 1;
- break;
+ cmd_args->resolve_gids = 1;
+ break;
case ARGP_FUSE_BACKGROUND_QLEN_KEY:
- if (!gf_string2int (arg, &cmd_args->background_qlen))
- break;
-
- argp_failure (state, -1, 0,
- "unknown background qlen option %s", arg);
+ if (!gf_string2int(arg, &cmd_args->background_qlen))
break;
- case ARGP_FUSE_CONGESTION_THRESHOLD_KEY:
- if (!gf_string2int (arg, &cmd_args->congestion_threshold))
- break;
- argp_failure (state, -1, 0,
- "unknown congestion threshold option %s", arg);
+ argp_failure(state, -1, 0, "unknown background qlen option %s",
+ arg);
+ break;
+ case ARGP_FUSE_CONGESTION_THRESHOLD_KEY:
+ if (!gf_string2int(arg, &cmd_args->congestion_threshold))
break;
+ argp_failure(state, -1, 0, "unknown congestion threshold option %s",
+ arg);
+ break;
+
#ifdef GF_LINUX_HOST_OS
case ARGP_OOM_SCORE_ADJ_KEY:
- k = 0;
+ k = 0;
- api = get_oom_api_info();
- if (!api)
- goto no_oom_api;
+ api = get_oom_api_info();
+ if (!api)
+ goto no_oom_api;
- if (gf_string2int (arg, &k) == 0 &&
- k >= api->oom_min && k <= api->oom_max) {
- cmd_args->oom_score_adj = gf_strdup (arg);
- break;
- }
+ if (gf_string2int(arg, &k) == 0 && k >= api->oom_min &&
+ k <= api->oom_max) {
+ cmd_args->oom_score_adj = gf_strdup(arg);
+ break;
+ }
- argp_failure (state, -1, 0,
- "unknown oom_score_adj value %s", arg);
+ argp_failure(state, -1, 0, "unknown oom_score_adj value %s", arg);
-no_oom_api:
- break;
+ no_oom_api:
+ break;
#endif
case ARGP_FUSE_MOUNTOPTS_KEY:
- cmd_args->fuse_mountopts = gf_strdup (arg);
- break;
+ cmd_args->fuse_mountopts = gf_strdup(arg);
+ break;
case ARGP_FUSE_USE_READDIRP_KEY:
- if (!arg)
- arg = "yes";
+ if (!arg)
+ arg = "yes";
- if (gf_string2boolean (arg, &b) == 0) {
- if (b) {
- cmd_args->use_readdirp = "yes";
- } else {
- cmd_args->use_readdirp = "no";
- }
-
- break;
+ if (gf_string2boolean(arg, &b) == 0) {
+ if (b) {
+ cmd_args->use_readdirp = "yes";
+ } else {
+ cmd_args->use_readdirp = "no";
}
- argp_failure (state, -1, 0,
- "unknown use-readdirp setting \"%s\"", arg);
break;
+ }
+
+ argp_failure(state, -1, 0, "unknown use-readdirp setting \"%s\"",
+ arg);
+ break;
case ARGP_LOGGER:
- if (strcasecmp (arg, GF_LOGGER_GLUSTER_LOG) == 0)
- cmd_args->logger = gf_logger_glusterlog;
- else if (strcasecmp (arg, GF_LOGGER_SYSLOG) == 0)
- cmd_args->logger = gf_logger_syslog;
- else
- argp_failure (state, -1, 0, "unknown logger %s", arg);
+ if (strcasecmp(arg, GF_LOGGER_GLUSTER_LOG) == 0)
+ cmd_args->logger = gf_logger_glusterlog;
+ else if (strcasecmp(arg, GF_LOGGER_SYSLOG) == 0)
+ cmd_args->logger = gf_logger_syslog;
+ else
+ argp_failure(state, -1, 0, "unknown logger %s", arg);
- break;
+ break;
case ARGP_LOG_FORMAT:
- if (strcasecmp (arg, GF_LOG_FORMAT_NO_MSG_ID) == 0)
- cmd_args->log_format = gf_logformat_traditional;
- else if (strcasecmp (arg, GF_LOG_FORMAT_WITH_MSG_ID) == 0)
- cmd_args->log_format = gf_logformat_withmsgid;
- else
- argp_failure (state, -1, 0, "unknown log format %s",
- arg);
+ if (strcasecmp(arg, GF_LOG_FORMAT_NO_MSG_ID) == 0)
+ cmd_args->log_format = gf_logformat_traditional;
+ else if (strcasecmp(arg, GF_LOG_FORMAT_WITH_MSG_ID) == 0)
+ cmd_args->log_format = gf_logformat_withmsgid;
+ else
+ argp_failure(state, -1, 0, "unknown log format %s", arg);
- break;
+ break;
case ARGP_LOG_BUF_SIZE:
- if (gf_string2uint32 (arg, &cmd_args->log_buf_size)) {
- argp_failure (state, -1, 0,
- "unknown log buf size option %s", arg);
- } else if (cmd_args->log_buf_size > GF_LOG_LRU_BUFSIZE_MAX) {
- argp_failure (state, -1, 0,
- "Invalid log buf size %s. "
- "Valid range: ["
- GF_LOG_LRU_BUFSIZE_MIN_STR","
- GF_LOG_LRU_BUFSIZE_MAX_STR"]", arg);
- }
-
- break;
+ if (gf_string2uint32(arg, &cmd_args->log_buf_size)) {
+ argp_failure(state, -1, 0, "unknown log buf size option %s",
+ arg);
+ } else if (cmd_args->log_buf_size > GF_LOG_LRU_BUFSIZE_MAX) {
+ argp_failure(state, -1, 0,
+ "Invalid log buf size %s. "
+ "Valid range: [" GF_LOG_LRU_BUFSIZE_MIN_STR
+ "," GF_LOG_LRU_BUFSIZE_MAX_STR "]",
+ arg);
+ }
+
+ break;
case ARGP_LOG_FLUSH_TIMEOUT:
- if (gf_string2uint32 (arg, &cmd_args->log_flush_timeout)) {
- argp_failure (state, -1, 0,
- "unknown log flush timeout option %s", arg);
- } else if ((cmd_args->log_flush_timeout <
- GF_LOG_FLUSH_TIMEOUT_MIN) ||
- (cmd_args->log_flush_timeout >
- GF_LOG_FLUSH_TIMEOUT_MAX)) {
- argp_failure (state, -1, 0,
- "Invalid log flush timeout %s. "
- "Valid range: ["
- GF_LOG_FLUSH_TIMEOUT_MIN_STR","
- GF_LOG_FLUSH_TIMEOUT_MAX_STR"]", arg);
- }
-
- break;
+ if (gf_string2uint32(arg, &cmd_args->log_flush_timeout)) {
+ argp_failure(state, -1, 0,
+ "unknown log flush timeout option %s", arg);
+ } else if ((cmd_args->log_flush_timeout <
+ GF_LOG_FLUSH_TIMEOUT_MIN) ||
+ (cmd_args->log_flush_timeout >
+ GF_LOG_FLUSH_TIMEOUT_MAX)) {
+ argp_failure(state, -1, 0,
+ "Invalid log flush timeout %s. "
+ "Valid range: [" GF_LOG_FLUSH_TIMEOUT_MIN_STR
+ "," GF_LOG_FLUSH_TIMEOUT_MAX_STR "]",
+ arg);
+ }
+
+ break;
case ARGP_SECURE_MGMT_KEY:
- if (!arg)
- arg = "yes";
+ if (!arg)
+ arg = "yes";
- if (gf_string2boolean (arg, &b) == 0) {
- cmd_args->secure_mgmt = b ? 1 : 0;
- break;
- }
-
- argp_failure (state, -1, 0,
- "unknown secure-mgmt setting \"%s\"", arg);
+ if (gf_string2boolean(arg, &b) == 0) {
+ cmd_args->secure_mgmt = b ? 1 : 0;
break;
+ }
+
+ argp_failure(state, -1, 0, "unknown secure-mgmt setting \"%s\"",
+ arg);
+ break;
case ARGP_LOCALTIME_LOGGING_KEY:
- cmd_args->localtime_logging = 1;
- break;
+ cmd_args->localtime_logging = 1;
+ break;
case ARGP_PROCESS_NAME_KEY:
- cmd_args->process_name = gf_strdup (arg);
- break;
+ cmd_args->process_name = gf_strdup(arg);
+ break;
case ARGP_SUBDIR_MOUNT_KEY:
- if (arg[0] != '/') {
- argp_failure (state, -1, 0,
- "expect '/%s', provided just \"%s\"", arg, arg);
- break;
- }
- cmd_args->subdir_mount = gf_strdup (arg);
+ if (arg[0] != '/') {
+ argp_failure(state, -1, 0, "expect '/%s', provided just \"%s\"",
+ arg, arg);
break;
+ }
+ cmd_args->subdir_mount = gf_strdup(arg);
+ break;
case ARGP_FUSE_EVENT_HISTORY_KEY:
- if (!arg)
- arg = "no";
+ if (!arg)
+ arg = "no";
- if (gf_string2boolean (arg, &b) == 0) {
- if (b) {
- cmd_args->event_history = "yes";
- } else {
- cmd_args->event_history = "no";
- }
-
- break;
+ if (gf_string2boolean(arg, &b) == 0) {
+ if (b) {
+ cmd_args->event_history = "yes";
+ } else {
+ cmd_args->event_history = "no";
}
- argp_failure (state, -1, 0,
- "unknown event-history setting \"%s\"", arg);
break;
- case ARGP_READER_THREAD_COUNT_KEY:
- if (gf_string2uint32 (arg, &cmd_args->reader_thread_count)) {
- argp_failure (state, -1, 0,
- "unknown reader thread count option %s",
- arg);
- } else if ((cmd_args->reader_thread_count < 1) ||
- (cmd_args->reader_thread_count > 64)) {
- argp_failure (state, -1, 0,
- "Invalid reader thread count %s. "
- "Valid range: [\"1, 64\"]", arg);
- }
+ }
- break;
+ argp_failure(state, -1, 0, "unknown event-history setting \"%s\"",
+ arg);
+ break;
+ case ARGP_READER_THREAD_COUNT_KEY:
+ if (gf_string2uint32(arg, &cmd_args->reader_thread_count)) {
+ argp_failure(state, -1, 0,
+ "unknown reader thread count option %s", arg);
+ } else if ((cmd_args->reader_thread_count < 1) ||
+ (cmd_args->reader_thread_count > 64)) {
+ argp_failure(state, -1, 0,
+ "Invalid reader thread count %s. "
+ "Valid range: [\"1, 64\"]",
+ arg);
+ }
+
+ break;
case ARGP_KERNEL_WRITEBACK_CACHE_KEY:
- if (!arg)
- arg = "yes";
+ if (!arg)
+ arg = "yes";
- if (gf_string2boolean (arg, &b) == 0) {
- cmd_args->kernel_writeback_cache = b;
-
- break;
- }
-
- argp_failure (state, -1, 0,
- "unknown kernel writeback cache setting \"%s\"", arg);
- break;
- case ARGP_ATTR_TIMES_GRANULARITY_KEY:
- if (gf_string2uint32 (arg, &cmd_args->attr_times_granularity)) {
- argp_failure (state, -1, 0,
- "unknown attribute times granularity option %s",
- arg);
- } else if (cmd_args->attr_times_granularity > 1000000000) {
- argp_failure (state, -1, 0,
- "Invalid attribute times granularity value %s. "
- "Valid range: [\"0, 1000000000\"]", arg);
- }
+ if (gf_string2boolean(arg, &b) == 0) {
+ cmd_args->kernel_writeback_cache = b;
break;
+ }
- }
- return 0;
+ argp_failure(state, -1, 0,
+ "unknown kernel writeback cache setting \"%s\"", arg);
+ break;
+ case ARGP_ATTR_TIMES_GRANULARITY_KEY:
+ if (gf_string2uint32(arg, &cmd_args->attr_times_granularity)) {
+ argp_failure(state, -1, 0,
+ "unknown attribute times granularity option %s",
+ arg);
+ } else if (cmd_args->attr_times_granularity > 1000000000) {
+ argp_failure(state, -1, 0,
+ "Invalid attribute times granularity value %s. "
+ "Valid range: [\"0, 1000000000\"]",
+ arg);
+ }
+
+ break;
+ }
+ return 0;
}
gf_boolean_t
-should_call_fini (glusterfs_ctx_t *ctx, xlator_t *trav)
+should_call_fini(glusterfs_ctx_t *ctx, xlator_t *trav)
{
- /* There's nothing to call, so the other checks don't matter. */
- if (!trav->fini) {
- return _gf_false;
- }
+ /* There's nothing to call, so the other checks don't matter. */
+ if (!trav->fini) {
+ return _gf_false;
+ }
- /* This preserves previous behavior in glusterd. */
- if (ctx->process_mode == GF_GLUSTERD_PROCESS) {
- return _gf_true;
- }
+ /* This preserves previous behavior in glusterd. */
+ if (ctx->process_mode == GF_GLUSTERD_PROCESS) {
+ return _gf_true;
+ }
- /* This is the only one known to be safe in glusterfsd. */
- if (!strcmp(trav->type,"experimental/fdl")) {
- return _gf_true;
- }
+ /* This is the only one known to be safe in glusterfsd. */
+ if (!strcmp(trav->type, "experimental/fdl")) {
+ return _gf_true;
+ }
- return _gf_false;
+ return _gf_false;
}
void
-cleanup_and_exit (int signum)
+cleanup_and_exit(int signum)
{
- glusterfs_ctx_t *ctx = NULL;
- xlator_t *trav = NULL;
- xlator_t *top;
- xlator_t *victim;
- xlator_list_t **trav_p;
-
- ctx = glusterfsd_ctx;
-
- if (!ctx)
- return;
-
- /* To take or not to take the mutex here and in the other
- * signal handler - gf_print_trace() - is the big question here.
- *
- * Taking mutex in signal handler would mean that if the process
- * receives a fatal signal while another thread is holding
- * ctx->log.log_buf_lock to perhaps log a message in _gf_msg_internal(),
- * the offending thread hangs on the mutex lock forever without letting
- * the process exit.
- *
- * On the other hand. not taking the mutex in signal handler would cause
- * it to modify the lru_list of buffered log messages in a racy manner,
- * corrupt the list and potentially give rise to an unending
- * cascade of SIGSEGVs and other re-entrancy issues.
- */
+ glusterfs_ctx_t *ctx = NULL;
+ xlator_t *trav = NULL;
+ xlator_t *top;
+ xlator_t *victim;
+ xlator_list_t **trav_p;
- gf_log_disable_suppression_before_exit (ctx);
+ ctx = glusterfsd_ctx;
- gf_msg_callingfn ("", GF_LOG_WARNING, 0, glusterfsd_msg_32,
- "received signum (%d), shutting down", signum);
-
- if (ctx->cleanup_started)
- return;
-
- ctx->cleanup_started = 1;
-
- /* signout should be sent to all the bricks in case brick mux is enabled
- * and multiple brick instances are attached to this process
- */
- if (ctx->active) {
- top = ctx->active->first;
- for (trav_p = &top->children; *trav_p;
- trav_p = &(*trav_p)->next) {
- victim = (*trav_p)->xlator;
- rpc_clnt_mgmt_pmap_signout (ctx, victim->name);
- }
- } else {
- rpc_clnt_mgmt_pmap_signout (ctx, NULL);
- }
+ if (!ctx)
+ return;
- /* below part is a racy code where the rpcsvc object is freed.
- * But in another thread (epoll thread), upon poll error in the
- * socket the transports are cleaned up where again rpcsvc object
- * is accessed (which is already freed by the below function).
- * Since the process is about to be killed don't execute the function
- * below.
- */
- /* if (ctx->listener) { */
- /* (void) glusterfs_listener_stop (ctx); */
- /* } */
-
- /* Call fini() of FUSE xlator first:
- * so there are no more requests coming and
- * 'umount' of mount point is done properly */
- trav = ctx->master;
- if (trav && trav->fini) {
- THIS = trav;
- trav->fini (trav);
- }
+ /* To take or not to take the mutex here and in the other
+ * signal handler - gf_print_trace() - is the big question here.
+ *
+ * Taking mutex in signal handler would mean that if the process
+ * receives a fatal signal while another thread is holding
+ * ctx->log.log_buf_lock to perhaps log a message in _gf_msg_internal(),
+ * the offending thread hangs on the mutex lock forever without letting
+ * the process exit.
+ *
+ * On the other hand. not taking the mutex in signal handler would cause
+ * it to modify the lru_list of buffered log messages in a racy manner,
+ * corrupt the list and potentially give rise to an unending
+ * cascade of SIGSEGVs and other re-entrancy issues.
+ */
+
+ gf_log_disable_suppression_before_exit(ctx);
+
+ gf_msg_callingfn("", GF_LOG_WARNING, 0, glusterfsd_msg_32,
+ "received signum (%d), shutting down", signum);
+
+ if (ctx->cleanup_started)
+ return;
- glusterfs_pidfile_cleanup (ctx);
+ ctx->cleanup_started = 1;
+
+ /* signout should be sent to all the bricks in case brick mux is enabled
+ * and multiple brick instances are attached to this process
+ */
+ if (ctx->active) {
+ top = ctx->active->first;
+ for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
+ victim = (*trav_p)->xlator;
+ rpc_clnt_mgmt_pmap_signout(ctx, victim->name);
+ }
+ } else {
+ rpc_clnt_mgmt_pmap_signout(ctx, NULL);
+ }
+
+ /* below part is a racy code where the rpcsvc object is freed.
+ * But in another thread (epoll thread), upon poll error in the
+ * socket the transports are cleaned up where again rpcsvc object
+ * is accessed (which is already freed by the below function).
+ * Since the process is about to be killed don't execute the function
+ * below.
+ */
+ /* if (ctx->listener) { */
+ /* (void) glusterfs_listener_stop (ctx); */
+ /* } */
+
+ /* Call fini() of FUSE xlator first:
+ * so there are no more requests coming and
+ * 'umount' of mount point is done properly */
+ trav = ctx->master;
+ if (trav && trav->fini) {
+ THIS = trav;
+ trav->fini(trav);
+ }
+
+ glusterfs_pidfile_cleanup(ctx);
#if 0
/* TODO: Properly do cleanup_and_exit(), with synchronization */
@@ -1569,303 +1529,304 @@ cleanup_and_exit (int signum)
}
#endif
- trav = NULL;
+ trav = NULL;
- /* NOTE: Only the least significant 8 bits i.e (signum & 255)
- will be available to parent process on calling exit() */
- exit(abs(signum));
+ /* NOTE: Only the least significant 8 bits i.e (signum & 255)
+ will be available to parent process on calling exit() */
+ exit(abs(signum));
}
-
static void
-reincarnate (int signum)
+reincarnate(int signum)
{
- int ret = 0;
- glusterfs_ctx_t *ctx = NULL;
- cmd_args_t *cmd_args = NULL;
-
- ctx = glusterfsd_ctx;
- cmd_args = &ctx->cmd_args;
-
- if (cmd_args->volfile_server) {
- gf_msg ("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11,
- "Fetching the volume file from server...");
- ret = glusterfs_volfile_fetch (ctx);
- } else {
- gf_msg_debug ("glusterfsd", 0,
- "Not reloading volume specification file"
- " on SIGHUP");
- }
-
- /* Also, SIGHUP should do logrotate */
- gf_log_logrotate (1);
-
- if (ret < 0)
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12,
- "volume initialization failed.");
-
- return;
+ int ret = 0;
+ glusterfs_ctx_t *ctx = NULL;
+ cmd_args_t *cmd_args = NULL;
+
+ ctx = glusterfsd_ctx;
+ cmd_args = &ctx->cmd_args;
+
+ if (cmd_args->volfile_server) {
+ gf_msg("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11,
+ "Fetching the volume file from server...");
+ ret = glusterfs_volfile_fetch(ctx);
+ } else {
+ gf_msg_debug("glusterfsd", 0,
+ "Not reloading volume specification file"
+ " on SIGHUP");
+ }
+
+ /* Also, SIGHUP should do logrotate */
+ gf_log_logrotate(1);
+
+ if (ret < 0)
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12,
+ "volume initialization failed.");
+
+ return;
}
void
-emancipate (glusterfs_ctx_t *ctx, int ret)
+emancipate(glusterfs_ctx_t *ctx, int ret)
{
- /* break free from the parent */
- if (ctx->daemon_pipe[1] != -1) {
- sys_write (ctx->daemon_pipe[1], (void *) &ret, sizeof (ret));
- sys_close (ctx->daemon_pipe[1]);
- ctx->daemon_pipe[1] = -1;
- }
+ /* break free from the parent */
+ if (ctx->daemon_pipe[1] != -1) {
+ sys_write(ctx->daemon_pipe[1], (void *)&ret, sizeof(ret));
+ sys_close(ctx->daemon_pipe[1]);
+ ctx->daemon_pipe[1] = -1;
+ }
}
static uint8_t
-gf_get_process_mode (char *exec_name)
+gf_get_process_mode(char *exec_name)
{
- char *dup_execname = NULL, *base = NULL;
- uint8_t ret = 0;
+ char *dup_execname = NULL, *base = NULL;
+ uint8_t ret = 0;
- dup_execname = gf_strdup (exec_name);
- base = basename (dup_execname);
+ dup_execname = gf_strdup(exec_name);
+ base = basename(dup_execname);
- if (!strncmp (base, "glusterfsd", 10)) {
- ret = GF_SERVER_PROCESS;
- } else if (!strncmp (base, "glusterd", 8)) {
- ret = GF_GLUSTERD_PROCESS;
- } else {
- ret = GF_CLIENT_PROCESS;
- }
+ if (!strncmp(base, "glusterfsd", 10)) {
+ ret = GF_SERVER_PROCESS;
+ } else if (!strncmp(base, "glusterd", 8)) {
+ ret = GF_GLUSTERD_PROCESS;
+ } else {
+ ret = GF_CLIENT_PROCESS;
+ }
- GF_FREE (dup_execname);
+ GF_FREE(dup_execname);
- return ret;
+ return ret;
}
-
static int
-glusterfs_ctx_defaults_init (glusterfs_ctx_t *ctx)
+glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
{
- cmd_args_t *cmd_args = NULL;
- struct rlimit lim = {0, };
- int ret = -1;
-
- if (!ctx)
- return ret;
-
- ret = xlator_mem_acct_init (THIS, gfd_mt_end);
- if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, 0, glusterfsd_msg_34,
- "memory accounting init failed.");
- return ret;
- }
+ cmd_args_t *cmd_args = NULL;
+ struct rlimit lim = {
+ 0,
+ };
+ int ret = -1;
- /* reset ret to -1 so that we don't need to explicitly
- * set it in all error paths before "goto err"
- */
- ret = -1;
-
- /* monitoring should be enabled by default */
- ctx->measure_latency = true;
-
- ctx->process_uuid = generate_glusterfs_ctx_id ();
- if (!ctx->process_uuid) {
- gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13,
- "ERROR: glusterfs uuid generation failed");
- goto out;
- }
-
- ctx->page_size = 128 * GF_UNIT_KB;
-
- ctx->iobuf_pool = iobuf_pool_new ();
- if (!ctx->iobuf_pool) {
- gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs iobuf pool creation failed");
- goto out;
- }
-
- ctx->event_pool = event_pool_new (DEFAULT_EVENT_POOL_SIZE,
- STARTING_EVENT_THREADS);
- if (!ctx->event_pool) {
- gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs event pool creation failed");
- goto out;
- }
-
- ctx->pool = GF_CALLOC (1, sizeof (call_pool_t), gfd_mt_call_pool_t);
- if (!ctx->pool) {
- gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs call pool creation failed");
- goto out;
- }
-
- INIT_LIST_HEAD (&ctx->pool->all_frames);
- LOCK_INIT (&ctx->pool->lock);
-
- /* frame_mem_pool size 112 * 4k */
- ctx->pool->frame_mem_pool = mem_pool_new (call_frame_t, 4096);
- if (!ctx->pool->frame_mem_pool) {
- gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs frame pool creation failed");
- goto out;
- }
- /* stack_mem_pool size 256 * 1024 */
- ctx->pool->stack_mem_pool = mem_pool_new (call_stack_t, 1024);
- if (!ctx->pool->stack_mem_pool) {
- gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs stack pool creation failed");
- goto out;
- }
-
- ctx->stub_mem_pool = mem_pool_new (call_stub_t, 1024);
- if (!ctx->stub_mem_pool) {
- gf_msg ("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs stub pool creation failed");
- goto out;
- }
-
- ctx->dict_pool = mem_pool_new (dict_t, GF_MEMPOOL_COUNT_OF_DICT_T);
- if (!ctx->dict_pool)
- goto out;
-
- ctx->dict_pair_pool = mem_pool_new (data_pair_t,
- GF_MEMPOOL_COUNT_OF_DATA_PAIR_T);
- if (!ctx->dict_pair_pool)
- goto out;
-
- ctx->dict_data_pool = mem_pool_new (data_t, GF_MEMPOOL_COUNT_OF_DATA_T);
- if (!ctx->dict_data_pool)
- goto out;
-
- ctx->logbuf_pool = mem_pool_new (log_buf_t,
- GF_MEMPOOL_COUNT_OF_LRU_BUF_T);
- if (!ctx->logbuf_pool)
- goto out;
-
- pthread_mutex_init (&ctx->notify_lock, NULL);
- pthread_cond_init (&ctx->notify_cond, NULL);
-
- ctx->clienttable = gf_clienttable_alloc();
- if (!ctx->clienttable)
- goto out;
-
- cmd_args = &ctx->cmd_args;
-
- /* parsing command line arguments */
- cmd_args->log_level = DEFAULT_LOG_LEVEL;
- cmd_args->logger = gf_logger_glusterlog;
- cmd_args->log_format = gf_logformat_withmsgid;
- cmd_args->log_buf_size = GF_LOG_LRU_BUFSIZE_DEFAULT;
- cmd_args->log_flush_timeout = GF_LOG_FLUSH_TIMEOUT_DEFAULT;
+ if (!ctx)
+ return ret;
- cmd_args->mac_compat = GF_OPTION_DISABLE;
+ ret = xlator_mem_acct_init(THIS, gfd_mt_end);
+ if (ret != 0) {
+ gf_msg(THIS->name, GF_LOG_CRITICAL, 0, glusterfsd_msg_34,
+ "memory accounting init failed.");
+ return ret;
+ }
+
+ /* reset ret to -1 so that we don't need to explicitly
+ * set it in all error paths before "goto err"
+ */
+ ret = -1;
+
+ /* monitoring should be enabled by default */
+ ctx->measure_latency = true;
+
+ ctx->process_uuid = generate_glusterfs_ctx_id();
+ if (!ctx->process_uuid) {
+ gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13,
+ "ERROR: glusterfs uuid generation failed");
+ goto out;
+ }
+
+ ctx->page_size = 128 * GF_UNIT_KB;
+
+ ctx->iobuf_pool = iobuf_pool_new();
+ if (!ctx->iobuf_pool) {
+ gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
+ "ERROR: glusterfs iobuf pool creation failed");
+ goto out;
+ }
+
+ ctx->event_pool = event_pool_new(DEFAULT_EVENT_POOL_SIZE,
+ STARTING_EVENT_THREADS);
+ if (!ctx->event_pool) {
+ gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
+ "ERROR: glusterfs event pool creation failed");
+ goto out;
+ }
+
+ ctx->pool = GF_CALLOC(1, sizeof(call_pool_t), gfd_mt_call_pool_t);
+ if (!ctx->pool) {
+ gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
+ "ERROR: glusterfs call pool creation failed");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&ctx->pool->all_frames);
+ LOCK_INIT(&ctx->pool->lock);
+
+ /* frame_mem_pool size 112 * 4k */
+ ctx->pool->frame_mem_pool = mem_pool_new(call_frame_t, 4096);
+ if (!ctx->pool->frame_mem_pool) {
+ gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
+ "ERROR: glusterfs frame pool creation failed");
+ goto out;
+ }
+ /* stack_mem_pool size 256 * 1024 */
+ ctx->pool->stack_mem_pool = mem_pool_new(call_stack_t, 1024);
+ if (!ctx->pool->stack_mem_pool) {
+ gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
+ "ERROR: glusterfs stack pool creation failed");
+ goto out;
+ }
+
+ ctx->stub_mem_pool = mem_pool_new(call_stub_t, 1024);
+ if (!ctx->stub_mem_pool) {
+ gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
+ "ERROR: glusterfs stub pool creation failed");
+ goto out;
+ }
+
+ ctx->dict_pool = mem_pool_new(dict_t, GF_MEMPOOL_COUNT_OF_DICT_T);
+ if (!ctx->dict_pool)
+ goto out;
+
+ ctx->dict_pair_pool = mem_pool_new(data_pair_t,
+ GF_MEMPOOL_COUNT_OF_DATA_PAIR_T);
+ if (!ctx->dict_pair_pool)
+ goto out;
+
+ ctx->dict_data_pool = mem_pool_new(data_t, GF_MEMPOOL_COUNT_OF_DATA_T);
+ if (!ctx->dict_data_pool)
+ goto out;
+
+ ctx->logbuf_pool = mem_pool_new(log_buf_t, GF_MEMPOOL_COUNT_OF_LRU_BUF_T);
+ if (!ctx->logbuf_pool)
+ goto out;
+
+ pthread_mutex_init(&ctx->notify_lock, NULL);
+ pthread_cond_init(&ctx->notify_cond, NULL);
+
+ ctx->clienttable = gf_clienttable_alloc();
+ if (!ctx->clienttable)
+ goto out;
+
+ cmd_args = &ctx->cmd_args;
+
+ /* parsing command line arguments */
+ cmd_args->log_level = DEFAULT_LOG_LEVEL;
+ cmd_args->logger = gf_logger_glusterlog;
+ cmd_args->log_format = gf_logformat_withmsgid;
+ cmd_args->log_buf_size = GF_LOG_LRU_BUFSIZE_DEFAULT;
+ cmd_args->log_flush_timeout = GF_LOG_FLUSH_TIMEOUT_DEFAULT;
+
+ cmd_args->mac_compat = GF_OPTION_DISABLE;
#ifdef GF_DARWIN_HOST_OS
- /* On Darwin machines, O_APPEND is not handled,
- * which may corrupt the data
- */
- cmd_args->fuse_direct_io_mode = GF_OPTION_DISABLE;
+ /* On Darwin machines, O_APPEND is not handled,
+ * which may corrupt the data
+ */
+ cmd_args->fuse_direct_io_mode = GF_OPTION_DISABLE;
#else
- cmd_args->fuse_direct_io_mode = GF_OPTION_DEFERRED;
+ cmd_args->fuse_direct_io_mode = GF_OPTION_DEFERRED;
#endif
- cmd_args->fuse_attribute_timeout = -1;
- cmd_args->fuse_entry_timeout = -1;
- cmd_args->fopen_keep_cache = GF_OPTION_DEFERRED;
- cmd_args->kernel_writeback_cache = GF_OPTION_DEFERRED;
+ cmd_args->fuse_attribute_timeout = -1;
+ cmd_args->fuse_entry_timeout = -1;
+ cmd_args->fopen_keep_cache = GF_OPTION_DEFERRED;
+ cmd_args->kernel_writeback_cache = GF_OPTION_DEFERRED;
- if (ctx->mem_acct_enable)
- cmd_args->mem_acct = 1;
+ if (ctx->mem_acct_enable)
+ cmd_args->mem_acct = 1;
- INIT_LIST_HEAD (&cmd_args->xlator_options);
- INIT_LIST_HEAD (&cmd_args->volfile_servers);
+ INIT_LIST_HEAD(&cmd_args->xlator_options);
+ INIT_LIST_HEAD(&cmd_args->volfile_servers);
- lim.rlim_cur = RLIM_INFINITY;
- lim.rlim_max = RLIM_INFINITY;
- setrlimit (RLIMIT_CORE, &lim);
+ lim.rlim_cur = RLIM_INFINITY;
+ lim.rlim_max = RLIM_INFINITY;
+ setrlimit(RLIMIT_CORE, &lim);
- ret = 0;
+ ret = 0;
out:
- if (ret) {
- if (ctx->pool) {
- mem_pool_destroy (ctx->pool->frame_mem_pool);
- mem_pool_destroy (ctx->pool->stack_mem_pool);
- }
- GF_FREE (ctx->pool);
- mem_pool_destroy (ctx->stub_mem_pool);
- mem_pool_destroy (ctx->dict_pool);
- mem_pool_destroy (ctx->dict_data_pool);
- mem_pool_destroy (ctx->dict_pair_pool);
- mem_pool_destroy (ctx->logbuf_pool);
+ if (ret) {
+ if (ctx->pool) {
+ mem_pool_destroy(ctx->pool->frame_mem_pool);
+ mem_pool_destroy(ctx->pool->stack_mem_pool);
}
+ GF_FREE(ctx->pool);
+ mem_pool_destroy(ctx->stub_mem_pool);
+ mem_pool_destroy(ctx->dict_pool);
+ mem_pool_destroy(ctx->dict_data_pool);
+ mem_pool_destroy(ctx->dict_pair_pool);
+ mem_pool_destroy(ctx->logbuf_pool);
+ }
- return ret;
+ return ret;
}
static int
-logging_init (glusterfs_ctx_t *ctx, const char *progpath)
+logging_init(glusterfs_ctx_t *ctx, const char *progpath)
{
- cmd_args_t *cmd_args = NULL;
- int ret = 0;
+ cmd_args_t *cmd_args = NULL;
+ int ret = 0;
- cmd_args = &ctx->cmd_args;
+ cmd_args = &ctx->cmd_args;
- if (cmd_args->log_file == NULL) {
- ret = gf_set_log_file_path (cmd_args, ctx);
- if (ret == -1) {
- fprintf (stderr, "ERROR: failed to set the log file "
- "path\n");
- return -1;
- }
+ if (cmd_args->log_file == NULL) {
+ ret = gf_set_log_file_path(cmd_args, ctx);
+ if (ret == -1) {
+ fprintf(stderr,
+ "ERROR: failed to set the log file "
+ "path\n");
+ return -1;
}
+ }
- if (cmd_args->log_ident == NULL) {
- ret = gf_set_log_ident (cmd_args);
- if (ret == -1) {
- fprintf (stderr, "ERROR: failed to set the log "
- "identity\n");
- return -1;
- }
+ if (cmd_args->log_ident == NULL) {
+ ret = gf_set_log_ident(cmd_args);
+ if (ret == -1) {
+ fprintf(stderr,
+ "ERROR: failed to set the log "
+ "identity\n");
+ return -1;
}
+ }
- /* finish log set parameters before init */
- gf_log_set_loglevel (ctx, cmd_args->log_level);
+ /* finish log set parameters before init */
+ gf_log_set_loglevel(ctx, cmd_args->log_level);
- gf_log_set_localtime (cmd_args->localtime_logging);
+ gf_log_set_localtime(cmd_args->localtime_logging);
- gf_log_set_logger (cmd_args->logger);
+ gf_log_set_logger(cmd_args->logger);
- gf_log_set_logformat (cmd_args->log_format);
+ gf_log_set_logformat(cmd_args->log_format);
- gf_log_set_log_buf_size (cmd_args->log_buf_size);
+ gf_log_set_log_buf_size(cmd_args->log_buf_size);
- gf_log_set_log_flush_timeout (cmd_args->log_flush_timeout);
+ gf_log_set_log_flush_timeout(cmd_args->log_flush_timeout);
- if (gf_log_init (ctx, cmd_args->log_file, cmd_args->log_ident) == -1) {
- fprintf (stderr, "ERROR: failed to open logfile %s\n",
- cmd_args->log_file);
- return -1;
- }
+ if (gf_log_init(ctx, cmd_args->log_file, cmd_args->log_ident) == -1) {
+ fprintf(stderr, "ERROR: failed to open logfile %s\n",
+ cmd_args->log_file);
+ return -1;
+ }
- /* At this point, all the logging related parameters are initialised
- * except for the log flush timer, which will be injected post fork(2)
- * in daemonize() . During this time, any log message that is logged
- * will be kept buffered. And if the list that holds these messages
- * overflows, then the same lru policy is used to drive out the least
- * recently used message and displace it with the message just logged.
- */
+ /* At this point, all the logging related parameters are initialised
+ * except for the log flush timer, which will be injected post fork(2)
+ * in daemonize() . During this time, any log message that is logged
+ * will be kept buffered. And if the list that holds these messages
+ * overflows, then the same lru policy is used to drive out the least
+ * recently used message and displace it with the message just logged.
+ */
- return 0;
+ return 0;
}
void
-gf_check_and_set_mem_acct (int argc, char *argv[])
+gf_check_and_set_mem_acct(int argc, char *argv[])
{
- int i = 0;
+ int i = 0;
- for (i = 0; i < argc; i++) {
- if (strcmp (argv[i], "--no-mem-accounting") == 0) {
- gf_global_mem_acct_enable_set (0);
- break;
- }
+ for (i = 0; i < argc; i++) {
+ if (strcmp(argv[i], "--no-mem-accounting") == 0) {
+ gf_global_mem_acct_enable_set(0);
+ break;
}
+ }
}
/**
@@ -1883,87 +1844,85 @@ gf_check_and_set_mem_acct (int argc, char *argv[])
* error messages. Hence there are different return values.
*/
int
-print_exports_file (const char *exports_file)
+print_exports_file(const char *exports_file)
{
- void *libhandle = NULL;
- char *libpathfull = NULL;
- struct exports_file *file = NULL;
- int ret = 0;
-
- int (*exp_file_parse)(const char *filepath,
- struct exports_file **expfile,
- struct mount3_state *ms) = NULL;
- void (*exp_file_print)(const struct exports_file *file) = NULL;
- void (*exp_file_deinit)(struct exports_file *ptr) = NULL;
-
- /* XLATORDIR passed through a -D flag to GCC */
- ret = gf_asprintf (&libpathfull, "%s/%s/server.so", XLATORDIR,
- "nfs");
- if (ret < 0) {
- gf_log ("glusterfs", GF_LOG_CRITICAL, "asprintf () failed.");
- ret = -1;
- goto out;
- }
-
- /* Load up the library */
- libhandle = dlopen (libpathfull, RTLD_NOW);
- if (!libhandle) {
- gf_log ("glusterfs", GF_LOG_CRITICAL,
- "Error loading NFS server library : "
- "%s\n", dlerror ());
- ret = -1;
- goto out;
- }
-
- /* Load up the function */
- exp_file_parse = dlsym (libhandle, "exp_file_parse");
- if (!exp_file_parse) {
- gf_log ("glusterfs", GF_LOG_CRITICAL,
- "Error finding function exp_file_parse "
- "in symbol.");
- ret = -1;
- goto out;
- }
-
- /* Parse the file */
- ret = exp_file_parse (exports_file, &file, NULL);
- if (ret < 0) {
- ret = 1; /* This means we failed to parse */
- goto out;
- }
-
- /* Load up the function */
- exp_file_print = dlsym (libhandle, "exp_file_print");
- if (!exp_file_print) {
- gf_log ("glusterfs", GF_LOG_CRITICAL,
- "Error finding function exp_file_print in symbol.");
- ret = -1;
- goto out;
- }
+ void *libhandle = NULL;
+ char *libpathfull = NULL;
+ struct exports_file *file = NULL;
+ int ret = 0;
+
+ int (*exp_file_parse)(const char *filepath, struct exports_file **expfile,
+ struct mount3_state *ms) = NULL;
+ void (*exp_file_print)(const struct exports_file *file) = NULL;
+ void (*exp_file_deinit)(struct exports_file * ptr) = NULL;
+
+ /* XLATORDIR passed through a -D flag to GCC */
+ ret = gf_asprintf(&libpathfull, "%s/%s/server.so", XLATORDIR, "nfs");
+ if (ret < 0) {
+ gf_log("glusterfs", GF_LOG_CRITICAL, "asprintf () failed.");
+ ret = -1;
+ goto out;
+ }
+
+ /* Load up the library */
+ libhandle = dlopen(libpathfull, RTLD_NOW);
+ if (!libhandle) {
+ gf_log("glusterfs", GF_LOG_CRITICAL,
+ "Error loading NFS server library : "
+ "%s\n",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ /* Load up the function */
+ exp_file_parse = dlsym(libhandle, "exp_file_parse");
+ if (!exp_file_parse) {
+ gf_log("glusterfs", GF_LOG_CRITICAL,
+ "Error finding function exp_file_parse "
+ "in symbol.");
+ ret = -1;
+ goto out;
+ }
+
+ /* Parse the file */
+ ret = exp_file_parse(exports_file, &file, NULL);
+ if (ret < 0) {
+ ret = 1; /* This means we failed to parse */
+ goto out;
+ }
+
+ /* Load up the function */
+ exp_file_print = dlsym(libhandle, "exp_file_print");
+ if (!exp_file_print) {
+ gf_log("glusterfs", GF_LOG_CRITICAL,
+ "Error finding function exp_file_print in symbol.");
+ ret = -1;
+ goto out;
+ }
- /* Print it out to screen */
- exp_file_print (file);
+ /* Print it out to screen */
+ exp_file_print(file);
- /* Load up the function */
- exp_file_deinit = dlsym (libhandle, "exp_file_deinit");
- if (!exp_file_deinit) {
- gf_log ("glusterfs", GF_LOG_CRITICAL,
- "Error finding function exp_file_deinit in lib.");
- ret = -1;
- goto out;
- }
+ /* Load up the function */
+ exp_file_deinit = dlsym(libhandle, "exp_file_deinit");
+ if (!exp_file_deinit) {
+ gf_log("glusterfs", GF_LOG_CRITICAL,
+ "Error finding function exp_file_deinit in lib.");
+ ret = -1;
+ goto out;
+ }
- /* Free the file */
- exp_file_deinit (file);
+ /* Free the file */
+ exp_file_deinit(file);
out:
- if (libhandle)
- dlclose(libhandle);
- GF_FREE (libpathfull);
- return ret;
+ if (libhandle)
+ dlclose(libhandle);
+ GF_FREE(libpathfull);
+ return ret;
}
-
/**
* print_netgroups_file - Print out & verify the syntax
* of the netgroups file specified
@@ -1981,821 +1940,804 @@ out:
* we want to print out a different error messages based on the ret value.
*/
int
-print_netgroups_file (const char *netgroups_file)
+print_netgroups_file(const char *netgroups_file)
{
- void *libhandle = NULL;
- char *libpathfull = NULL;
- struct netgroups_file *file = NULL;
- int ret = 0;
-
- struct netgroups_file *(*ng_file_parse)(const char *file_path) = NULL;
- void (*ng_file_print)(const struct netgroups_file *file) = NULL;
- void (*ng_file_deinit)(struct netgroups_file *ptr) = NULL;
-
- /* XLATORDIR passed through a -D flag to GCC */
- ret = gf_asprintf (&libpathfull, "%s/%s/server.so", XLATORDIR,
- "nfs");
- if (ret < 0) {
- gf_log ("glusterfs", GF_LOG_CRITICAL, "asprintf () failed.");
- ret = -1;
- goto out;
- }
- /* Load up the library */
- libhandle = dlopen (libpathfull, RTLD_NOW);
- if (!libhandle) {
- gf_log ("glusterfs", GF_LOG_CRITICAL,
- "Error loading NFS server library : %s\n", dlerror ());
- ret = -1;
- goto out;
- }
-
- /* Load up the function */
- ng_file_parse = dlsym (libhandle, "ng_file_parse");
- if (!ng_file_parse) {
- gf_log ("glusterfs", GF_LOG_CRITICAL,
- "Error finding function ng_file_parse in symbol.");
- ret = -1;
- goto out;
- }
-
- /* Parse the file */
- file = ng_file_parse (netgroups_file);
- if (!file) {
- ret = 1; /* This means we failed to parse */
- goto out;
- }
-
- /* Load up the function */
- ng_file_print = dlsym (libhandle, "ng_file_print");
- if (!ng_file_print) {
- gf_log ("glusterfs", GF_LOG_CRITICAL,
- "Error finding function ng_file_print in symbol.");
- ret = -1;
- goto out;
- }
+ void *libhandle = NULL;
+ char *libpathfull = NULL;
+ struct netgroups_file *file = NULL;
+ int ret = 0;
+
+ struct netgroups_file *(*ng_file_parse)(const char *file_path) = NULL;
+ void (*ng_file_print)(const struct netgroups_file *file) = NULL;
+ void (*ng_file_deinit)(struct netgroups_file * ptr) = NULL;
+
+ /* XLATORDIR passed through a -D flag to GCC */
+ ret = gf_asprintf(&libpathfull, "%s/%s/server.so", XLATORDIR, "nfs");
+ if (ret < 0) {
+ gf_log("glusterfs", GF_LOG_CRITICAL, "asprintf () failed.");
+ ret = -1;
+ goto out;
+ }
+ /* Load up the library */
+ libhandle = dlopen(libpathfull, RTLD_NOW);
+ if (!libhandle) {
+ gf_log("glusterfs", GF_LOG_CRITICAL,
+ "Error loading NFS server library : %s\n", dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ /* Load up the function */
+ ng_file_parse = dlsym(libhandle, "ng_file_parse");
+ if (!ng_file_parse) {
+ gf_log("glusterfs", GF_LOG_CRITICAL,
+ "Error finding function ng_file_parse in symbol.");
+ ret = -1;
+ goto out;
+ }
+
+ /* Parse the file */
+ file = ng_file_parse(netgroups_file);
+ if (!file) {
+ ret = 1; /* This means we failed to parse */
+ goto out;
+ }
+
+ /* Load up the function */
+ ng_file_print = dlsym(libhandle, "ng_file_print");
+ if (!ng_file_print) {
+ gf_log("glusterfs", GF_LOG_CRITICAL,
+ "Error finding function ng_file_print in symbol.");
+ ret = -1;
+ goto out;
+ }
- /* Print it out to screen */
- ng_file_print (file);
+ /* Print it out to screen */
+ ng_file_print(file);
- /* Load up the function */
- ng_file_deinit = dlsym (libhandle, "ng_file_deinit");
- if (!ng_file_deinit) {
- gf_log ("glusterfs", GF_LOG_CRITICAL,
- "Error finding function ng_file_deinit in lib.");
- ret = -1;
- goto out;
- }
+ /* Load up the function */
+ ng_file_deinit = dlsym(libhandle, "ng_file_deinit");
+ if (!ng_file_deinit) {
+ gf_log("glusterfs", GF_LOG_CRITICAL,
+ "Error finding function ng_file_deinit in lib.");
+ ret = -1;
+ goto out;
+ }
- /* Free the file */
- ng_file_deinit (file);
+ /* Free the file */
+ ng_file_deinit(file);
out:
- if (libhandle)
- dlclose(libhandle);
- GF_FREE (libpathfull);
- return ret;
+ if (libhandle)
+ dlclose(libhandle);
+ GF_FREE(libpathfull);
+ return ret;
}
-
int
-parse_cmdline (int argc, char *argv[], glusterfs_ctx_t *ctx)
+parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
{
- int process_mode = 0;
- int ret = 0;
- struct stat stbuf = {0, };
- char timestr[32];
- char tmp_logfile[1024] = { 0 };
- char *tmp_logfile_dyn = NULL;
- char *tmp_logfilebase = NULL;
- cmd_args_t *cmd_args = NULL;
- int len = 0;
- char *thin_volfileid = NULL;
-
- cmd_args = &ctx->cmd_args;
-
- /* Do this before argp_parse so it can be overridden. */
- if (sys_access (SECURE_ACCESS_FILE, F_OK) == 0) {
- cmd_args->secure_mgmt = 1;
- ctx->ssl_cert_depth = glusterfs_read_secure_access_file ();
- }
-
- argp_parse (&argp, argc, argv, ARGP_IN_ORDER, NULL, cmd_args);
-
- if (cmd_args->print_xlatordir || cmd_args->print_statedumpdir ||
- cmd_args->print_logdir || cmd_args->print_libexecdir) {
- /* Just print, nothing else to do */
- goto out;
- }
-
- if (cmd_args->print_netgroups) {
- /* When this option is set we don't want to do anything else
- * except for printing & verifying the netgroups file.
- */
- ret = 0;
- goto out;
- }
+ int process_mode = 0;
+ int ret = 0;
+ struct stat stbuf = {
+ 0,
+ };
+ char timestr[32];
+ char tmp_logfile[1024] = {0};
+ char *tmp_logfile_dyn = NULL;
+ char *tmp_logfilebase = NULL;
+ cmd_args_t *cmd_args = NULL;
+ int len = 0;
+ char *thin_volfileid = NULL;
+
+ cmd_args = &ctx->cmd_args;
+
+ /* Do this before argp_parse so it can be overridden. */
+ if (sys_access(SECURE_ACCESS_FILE, F_OK) == 0) {
+ cmd_args->secure_mgmt = 1;
+ ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
+ }
+
+ argp_parse(&argp, argc, argv, ARGP_IN_ORDER, NULL, cmd_args);
+
+ if (cmd_args->print_xlatordir || cmd_args->print_statedumpdir ||
+ cmd_args->print_logdir || cmd_args->print_libexecdir) {
+ /* Just print, nothing else to do */
+ goto out;
+ }
+
+ if (cmd_args->print_netgroups) {
+ /* When this option is set we don't want to do anything else
+ * except for printing & verifying the netgroups file.
+ */
+ ret = 0;
+ goto out;
+ }
- if (cmd_args->print_exports) {
- /* When this option is set we don't want to do anything else
- * except for printing & verifying the exports file.
- */
- ret = 0;
+ if (cmd_args->print_exports) {
+ /* When this option is set we don't want to do anything else
+ * except for printing & verifying the exports file.
+ */
+ ret = 0;
+ goto out;
+ }
+
+ ctx->secure_mgmt = cmd_args->secure_mgmt;
+
+ if (ENABLE_DEBUG_MODE == cmd_args->debug_mode) {
+ cmd_args->log_level = GF_LOG_DEBUG;
+ cmd_args->log_file = gf_strdup("/dev/stderr");
+ cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE;
+ }
+
+ process_mode = gf_get_process_mode(argv[0]);
+ ctx->process_mode = process_mode;
+
+ if (cmd_args->process_name) {
+ ctx->cmd_args.process_name = cmd_args->process_name;
+ }
+ /* Make sure after the parsing cli, if '--volfile-server' option is
+ given, then '--volfile-id' is mandatory */
+ if (cmd_args->volfile_server && !cmd_args->volfile_id) {
+ gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15,
+ "ERROR: '--volfile-id' is mandatory if '-s' OR "
+ "'--volfile-server' option is given");
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd_args->volfile_server == NULL) && (cmd_args->volfile == NULL)) {
+ if (process_mode == GF_SERVER_PROCESS)
+ cmd_args->volfile = gf_strdup(DEFAULT_SERVER_VOLFILE);
+ else if (process_mode == GF_GLUSTERD_PROCESS)
+ cmd_args->volfile = gf_strdup(DEFAULT_GLUSTERD_VOLFILE);
+ else
+ cmd_args->volfile = gf_strdup(DEFAULT_CLIENT_VOLFILE);
+
+ /* Check if the volfile exists, if not give usage output
+ and exit */
+ ret = sys_stat(cmd_args->volfile, &stbuf);
+ if (ret) {
+ gf_msg("glusterfs", GF_LOG_CRITICAL, errno, glusterfsd_msg_16,
+ "ERROR: parsing the volfile failed");
+ /* argp_usage (argp.) */
+ fprintf(stderr, "USAGE: %s [options] [mountpoint]\n", argv[0]);
+ goto out;
+ }
+ }
+
+ if (cmd_args->thin_client) {
+ len = strlen(cmd_args->volfile_id) + SLEN("gfproxy-client/");
+ thin_volfileid = GF_MALLOC(len + 1, gf_common_mt_char);
+ snprintf(thin_volfileid, len + 1, "gfproxy-client/%s",
+ cmd_args->volfile_id);
+ GF_FREE(cmd_args->volfile_id);
+ cmd_args->volfile_id = thin_volfileid;
+ }
+
+ if (cmd_args->run_id) {
+ ret = sys_lstat(cmd_args->log_file, &stbuf);
+ /* If its /dev/null, or /dev/stdout, /dev/stderr,
+ * let it use the same, no need to alter
+ */
+ if (((ret == 0) &&
+ (S_ISREG(stbuf.st_mode) || S_ISLNK(stbuf.st_mode))) ||
+ (ret == -1)) {
+ /* Have separate logfile per run */
+ gf_time_fmt(timestr, sizeof timestr, time(NULL), gf_timefmt_FT);
+ sprintf(tmp_logfile, "%s.%s.%d", cmd_args->log_file, timestr,
+ getpid());
+
+ /* Create symlink to actual log file */
+ sys_unlink(cmd_args->log_file);
+
+ tmp_logfile_dyn = gf_strdup(tmp_logfile);
+ tmp_logfilebase = basename(tmp_logfile_dyn);
+ ret = sys_symlink(tmp_logfilebase, cmd_args->log_file);
+ if (ret == -1) {
+ fprintf(stderr, "ERROR: symlink of logfile failed\n");
goto out;
- }
-
+ }
- ctx->secure_mgmt = cmd_args->secure_mgmt;
+ GF_FREE(cmd_args->log_file);
+ cmd_args->log_file = gf_strdup(tmp_logfile);
- if (ENABLE_DEBUG_MODE == cmd_args->debug_mode) {
- cmd_args->log_level = GF_LOG_DEBUG;
- cmd_args->log_file = gf_strdup ("/dev/stderr");
- cmd_args->no_daemon_mode = ENABLE_NO_DAEMON_MODE;
- }
-
- process_mode = gf_get_process_mode (argv[0]);
- ctx->process_mode = process_mode;
-
- if (cmd_args->process_name) {
- ctx->cmd_args.process_name = cmd_args->process_name;
- }
- /* Make sure after the parsing cli, if '--volfile-server' option is
- given, then '--volfile-id' is mandatory */
- if (cmd_args->volfile_server && !cmd_args->volfile_id) {
- gf_msg ("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15,
- "ERROR: '--volfile-id' is mandatory if '-s' OR "
- "'--volfile-server' option is given");
- ret = -1;
- goto out;
+ GF_FREE(tmp_logfile_dyn);
}
+ }
- if ((cmd_args->volfile_server == NULL)
- && (cmd_args->volfile == NULL)) {
- if (process_mode == GF_SERVER_PROCESS)
- cmd_args->volfile = gf_strdup (DEFAULT_SERVER_VOLFILE);
- else if (process_mode == GF_GLUSTERD_PROCESS)
- cmd_args->volfile = gf_strdup (DEFAULT_GLUSTERD_VOLFILE);
- else
- cmd_args->volfile = gf_strdup (DEFAULT_CLIENT_VOLFILE);
-
- /* Check if the volfile exists, if not give usage output
- and exit */
- ret = sys_stat (cmd_args->volfile, &stbuf);
- if (ret) {
- gf_msg ("glusterfs", GF_LOG_CRITICAL, errno,
- glusterfsd_msg_16,
- "ERROR: parsing the volfile failed");
- /* argp_usage (argp.) */
- fprintf (stderr, "USAGE: %s [options] [mountpoint]\n",
- argv[0]);
- goto out;
- }
- }
-
- if (cmd_args->thin_client) {
- len = strlen (cmd_args->volfile_id) + SLEN ("gfproxy-client/");
- thin_volfileid = GF_MALLOC (len + 1, gf_common_mt_char);
- snprintf (thin_volfileid, len + 1, "gfproxy-client/%s",
- cmd_args->volfile_id);
- GF_FREE (cmd_args->volfile_id);
- cmd_args->volfile_id = thin_volfileid;
- }
-
- if (cmd_args->run_id) {
- ret = sys_lstat (cmd_args->log_file, &stbuf);
- /* If its /dev/null, or /dev/stdout, /dev/stderr,
- * let it use the same, no need to alter
- */
- if (((ret == 0) &&
- (S_ISREG (stbuf.st_mode) || S_ISLNK (stbuf.st_mode))) ||
- (ret == -1)) {
- /* Have separate logfile per run */
- gf_time_fmt (timestr, sizeof timestr, time (NULL),
- gf_timefmt_FT);
- sprintf (tmp_logfile, "%s.%s.%d",
- cmd_args->log_file, timestr, getpid ());
-
- /* Create symlink to actual log file */
- sys_unlink (cmd_args->log_file);
-
- tmp_logfile_dyn = gf_strdup (tmp_logfile);
- tmp_logfilebase = basename (tmp_logfile_dyn);
- ret = sys_symlink (tmp_logfilebase,
- cmd_args->log_file);
- if (ret == -1) {
- fprintf (stderr, "ERROR: symlink of logfile failed\n");
- goto out;
- }
-
- GF_FREE (cmd_args->log_file);
- cmd_args->log_file = gf_strdup (tmp_logfile);
-
- GF_FREE (tmp_logfile_dyn);
- }
- }
-
- /*
- This option was made obsolete but parsing it for backward
- compatibility with third party applications
- */
- if (cmd_args->max_connect_attempts) {
- gf_msg ("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33,
- "obsolete option '--volfile-max-fecth-attempts or "
- "fetch-attempts' was provided");
- }
+ /*
+ This option was made obsolete but parsing it for backward
+ compatibility with third party applications
+ */
+ if (cmd_args->max_connect_attempts) {
+ gf_msg("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33,
+ "obsolete option '--volfile-max-fecth-attempts or "
+ "fetch-attempts' was provided");
+ }
#ifdef GF_DARWIN_HOST_OS
- if (cmd_args->mount_point)
- cmd_args->mac_compat = GF_OPTION_DEFERRED;
+ if (cmd_args->mount_point)
+ cmd_args->mac_compat = GF_OPTION_DEFERRED;
#endif
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
-
int
-glusterfs_pidfile_setup (glusterfs_ctx_t *ctx)
+glusterfs_pidfile_setup(glusterfs_ctx_t *ctx)
{
- cmd_args_t *cmd_args = NULL;
- int ret = -1;
- FILE *pidfp = NULL;
+ cmd_args_t *cmd_args = NULL;
+ int ret = -1;
+ FILE *pidfp = NULL;
- cmd_args = &ctx->cmd_args;
+ cmd_args = &ctx->cmd_args;
- if (!cmd_args->pid_file)
- return 0;
+ if (!cmd_args->pid_file)
+ return 0;
- pidfp = fopen (cmd_args->pid_file, "a+");
- if (!pidfp) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17,
- "pidfile %s open failed", cmd_args->pid_file);
- goto out;
- }
+ pidfp = fopen(cmd_args->pid_file, "a+");
+ if (!pidfp) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17,
+ "pidfile %s open failed", cmd_args->pid_file);
+ goto out;
+ }
- ctx->pidfp = pidfp;
+ ctx->pidfp = pidfp;
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
-
int
-glusterfs_pidfile_cleanup (glusterfs_ctx_t *ctx)
+glusterfs_pidfile_cleanup(glusterfs_ctx_t *ctx)
{
- cmd_args_t *cmd_args = NULL;
+ cmd_args_t *cmd_args = NULL;
- cmd_args = &ctx->cmd_args;
+ cmd_args = &ctx->cmd_args;
- if (!ctx->pidfp)
- return 0;
+ if (!ctx->pidfp)
+ return 0;
- gf_msg_trace ("glusterfsd", 0, "pidfile %s cleanup",
- cmd_args->pid_file);
+ gf_msg_trace("glusterfsd", 0, "pidfile %s cleanup", cmd_args->pid_file);
- if (ctx->cmd_args.pid_file) {
- ctx->cmd_args.pid_file = NULL;
- }
+ if (ctx->cmd_args.pid_file) {
+ ctx->cmd_args.pid_file = NULL;
+ }
- lockf (fileno (ctx->pidfp), F_ULOCK, 0);
- fclose (ctx->pidfp);
- ctx->pidfp = NULL;
+ lockf(fileno(ctx->pidfp), F_ULOCK, 0);
+ fclose(ctx->pidfp);
+ ctx->pidfp = NULL;
- return 0;
+ return 0;
}
int
-glusterfs_pidfile_update (glusterfs_ctx_t *ctx, pid_t pid)
+glusterfs_pidfile_update(glusterfs_ctx_t *ctx, pid_t pid)
{
- cmd_args_t *cmd_args = NULL;
- int ret = 0;
- FILE *pidfp = NULL;
+ cmd_args_t *cmd_args = NULL;
+ int ret = 0;
+ FILE *pidfp = NULL;
- cmd_args = &ctx->cmd_args;
+ cmd_args = &ctx->cmd_args;
- pidfp = ctx->pidfp;
- if (!pidfp)
- return 0;
+ pidfp = ctx->pidfp;
+ if (!pidfp)
+ return 0;
- ret = lockf (fileno (pidfp), F_TLOCK, 0);
- if (ret) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18,
- "pidfile %s lock failed", cmd_args->pid_file);
- return ret;
- }
+ ret = lockf(fileno(pidfp), F_TLOCK, 0);
+ if (ret) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18,
+ "pidfile %s lock failed", cmd_args->pid_file);
+ return ret;
+ }
- ret = sys_ftruncate (fileno (pidfp), 0);
- if (ret) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20,
- "pidfile %s truncation failed", cmd_args->pid_file);
- return ret;
- }
+ ret = sys_ftruncate(fileno(pidfp), 0);
+ if (ret) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20,
+ "pidfile %s truncation failed", cmd_args->pid_file);
+ return ret;
+ }
- ret = fprintf (pidfp, "%d\n", pid);
- if (ret <= 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
- "pidfile %s write failed", cmd_args->pid_file);
- return ret;
- }
+ ret = fprintf(pidfp, "%d\n", pid);
+ if (ret <= 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
+ "pidfile %s write failed", cmd_args->pid_file);
+ return ret;
+ }
- ret = fflush (pidfp);
- if (ret) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
- "pidfile %s write failed", cmd_args->pid_file);
- return ret;
- }
+ ret = fflush(pidfp);
+ if (ret) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
+ "pidfile %s write failed", cmd_args->pid_file);
+ return ret;
+ }
- gf_msg_debug ("glusterfsd", 0, "pidfile %s updated with pid %d",
- cmd_args->pid_file, pid);
+ gf_msg_debug("glusterfsd", 0, "pidfile %s updated with pid %d",
+ cmd_args->pid_file, pid);
- return 0;
+ return 0;
}
-
void *
-glusterfs_sigwaiter (void *arg)
+glusterfs_sigwaiter(void *arg)
{
- sigset_t set;
- int ret = 0;
- int sig = 0;
- char *file = NULL;
-
- sigemptyset (&set);
- sigaddset (&set, SIGINT); /* cleanup_and_exit */
- sigaddset (&set, SIGTERM); /* cleanup_and_exit */
- sigaddset (&set, SIGHUP); /* reincarnate */
- sigaddset (&set, SIGUSR1); /* gf_proc_dump_info */
- sigaddset (&set, SIGUSR2);
-
- for (;;) {
- ret = sigwait (&set, &sig);
- if (ret)
- continue;
-
-
- switch (sig) {
- case SIGINT:
- case SIGTERM:
- cleanup_and_exit (sig);
- break;
- case SIGHUP:
- reincarnate (sig);
- break;
- case SIGUSR1:
- gf_proc_dump_info (sig, glusterfsd_ctx);
- break;
- case SIGUSR2:
- file = gf_monitor_metrics (glusterfsd_ctx);
+ sigset_t set;
+ int ret = 0;
+ int sig = 0;
+ char *file = NULL;
+
+ sigemptyset(&set);
+ sigaddset(&set, SIGINT); /* cleanup_and_exit */
+ sigaddset(&set, SIGTERM); /* cleanup_and_exit */
+ sigaddset(&set, SIGHUP); /* reincarnate */
+ sigaddset(&set, SIGUSR1); /* gf_proc_dump_info */
+ sigaddset(&set, SIGUSR2);
+
+ for (;;) {
+ ret = sigwait(&set, &sig);
+ if (ret)
+ continue;
- /* Nothing needed to be done here */
- GF_FREE (file);
+ switch (sig) {
+ case SIGINT:
+ case SIGTERM:
+ cleanup_and_exit(sig);
+ break;
+ case SIGHUP:
+ reincarnate(sig);
+ break;
+ case SIGUSR1:
+ gf_proc_dump_info(sig, glusterfsd_ctx);
+ break;
+ case SIGUSR2:
+ file = gf_monitor_metrics(glusterfsd_ctx);
- break;
- default:
+ /* Nothing needed to be done here */
+ GF_FREE(file);
- break;
- }
+ break;
+ default:
+
+ break;
}
+ }
- return NULL;
+ return NULL;
}
-
void
-glusterfsd_print_trace (int signum)
+glusterfsd_print_trace(int signum)
{
- gf_print_trace (signum, glusterfsd_ctx);
+ gf_print_trace(signum, glusterfsd_ctx);
}
-
int
-glusterfs_signals_setup (glusterfs_ctx_t *ctx)
+glusterfs_signals_setup(glusterfs_ctx_t *ctx)
{
- sigset_t set;
- int ret = 0;
-
- sigemptyset (&set);
-
- /* common setting for all threads */
- signal (SIGSEGV, glusterfsd_print_trace);
- signal (SIGABRT, glusterfsd_print_trace);
- signal (SIGILL, glusterfsd_print_trace);
- signal (SIGTRAP, glusterfsd_print_trace);
- signal (SIGFPE, glusterfsd_print_trace);
- signal (SIGBUS, glusterfsd_print_trace);
- signal (SIGINT, cleanup_and_exit);
- signal (SIGPIPE, SIG_IGN);
-
- /* block these signals from non-sigwaiter threads */
- sigaddset (&set, SIGTERM); /* cleanup_and_exit */
- sigaddset (&set, SIGHUP); /* reincarnate */
- sigaddset (&set, SIGUSR1); /* gf_proc_dump_info */
- sigaddset (&set, SIGUSR2);
-
- ret = pthread_sigmask (SIG_BLOCK, &set, NULL);
- if (ret) {
- gf_msg ("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22,
- "failed to execute pthread_sigmask");
- return ret;
- }
-
- ret = gf_thread_create (&ctx->sigwaiter, NULL, glusterfs_sigwaiter,
- (void *) &set, "sigwait");
- if (ret) {
- /*
- TODO:
- fallback to signals getting handled by other threads.
- setup the signal handlers
- */
- gf_msg ("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23,
- "failed to create pthread");
- return ret;
- }
+ sigset_t set;
+ int ret = 0;
+
+ sigemptyset(&set);
+
+ /* common setting for all threads */
+ signal(SIGSEGV, glusterfsd_print_trace);
+ signal(SIGABRT, glusterfsd_print_trace);
+ signal(SIGILL, glusterfsd_print_trace);
+ signal(SIGTRAP, glusterfsd_print_trace);
+ signal(SIGFPE, glusterfsd_print_trace);
+ signal(SIGBUS, glusterfsd_print_trace);
+ signal(SIGINT, cleanup_and_exit);
+ signal(SIGPIPE, SIG_IGN);
+
+ /* block these signals from non-sigwaiter threads */
+ sigaddset(&set, SIGTERM); /* cleanup_and_exit */
+ sigaddset(&set, SIGHUP); /* reincarnate */
+ sigaddset(&set, SIGUSR1); /* gf_proc_dump_info */
+ sigaddset(&set, SIGUSR2);
+
+ ret = pthread_sigmask(SIG_BLOCK, &set, NULL);
+ if (ret) {
+ gf_msg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22,
+ "failed to execute pthread_sigmask");
+ return ret;
+ }
+ ret = gf_thread_create(&ctx->sigwaiter, NULL, glusterfs_sigwaiter,
+ (void *)&set, "sigwait");
+ if (ret) {
+ /*
+ TODO:
+ fallback to signals getting handled by other threads.
+ setup the signal handlers
+ */
+ gf_msg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23,
+ "failed to create pthread");
return ret;
-}
+ }
+ return ret;
+}
int
-daemonize (glusterfs_ctx_t *ctx)
+daemonize(glusterfs_ctx_t *ctx)
{
- int ret = -1;
- cmd_args_t *cmd_args = NULL;
- int cstatus = 0;
- int err = 1;
- int child_pid = 0;
-
- cmd_args = &ctx->cmd_args;
-
- ret = glusterfs_pidfile_setup (ctx);
- if (ret)
- goto out;
+ int ret = -1;
+ cmd_args_t *cmd_args = NULL;
+ int cstatus = 0;
+ int err = 1;
+ int child_pid = 0;
- if (cmd_args->no_daemon_mode) {
- ret = glusterfs_pidfile_update (ctx, getpid());
- if (ret)
- goto out;
- goto postfork;
- }
+ cmd_args = &ctx->cmd_args;
- if (cmd_args->debug_mode)
- goto postfork;
-
- ret = pipe (ctx->daemon_pipe);
- if (ret) {
- /* If pipe() fails, retain daemon_pipe[] = {-1, -1}
- and parent will just not wait for child status
- */
- ctx->daemon_pipe[0] = -1;
- ctx->daemon_pipe[1] = -1;
- }
+ ret = glusterfs_pidfile_setup(ctx);
+ if (ret)
+ goto out;
- ret = os_daemon_return (0, 0);
- switch (ret) {
+ if (cmd_args->no_daemon_mode) {
+ ret = glusterfs_pidfile_update(ctx, getpid());
+ if (ret)
+ goto out;
+ goto postfork;
+ }
+
+ if (cmd_args->debug_mode)
+ goto postfork;
+
+ ret = pipe(ctx->daemon_pipe);
+ if (ret) {
+ /* If pipe() fails, retain daemon_pipe[] = {-1, -1}
+ and parent will just not wait for child status
+ */
+ ctx->daemon_pipe[0] = -1;
+ ctx->daemon_pipe[1] = -1;
+ }
+
+ ret = os_daemon_return(0, 0);
+ switch (ret) {
case -1:
- if (ctx->daemon_pipe[0] != -1) {
- sys_close (ctx->daemon_pipe[0]);
- sys_close (ctx->daemon_pipe[1]);
- }
-
- gf_msg ("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24,
- "daemonization failed");
- goto out;
+ if (ctx->daemon_pipe[0] != -1) {
+ sys_close(ctx->daemon_pipe[0]);
+ sys_close(ctx->daemon_pipe[1]);
+ }
+
+ gf_msg("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24,
+ "daemonization failed");
+ goto out;
case 0:
- /* child */
- /* close read */
- sys_close (ctx->daemon_pipe[0]);
- break;
+ /* child */
+ /* close read */
+ sys_close(ctx->daemon_pipe[0]);
+ break;
default:
- /* parent */
- /* close write */
- child_pid = ret;
- sys_close (ctx->daemon_pipe[1]);
-
- if (ctx->mnt_pid > 0) {
- ret = waitpid (ctx->mnt_pid, &cstatus, 0);
- if (!(ret == ctx->mnt_pid)) {
- if (WIFEXITED(cstatus)) {
- err = WEXITSTATUS(cstatus);
- } else {
- err = cstatus;
- }
- gf_msg ("daemonize", GF_LOG_ERROR, 0,
- glusterfsd_msg_25, "mount failed");
- exit (err);
- }
- }
- sys_read (ctx->daemon_pipe[0], (void *)&err, sizeof (err));
- /* NOTE: Only the least significant 8 bits i.e (err & 255)
- will be available to parent process on calling exit() */
- if (err)
- _exit (abs(err));
- ret = glusterfs_pidfile_update (ctx, child_pid);
- if (ret)
- _exit (1);
- _exit (0);
- }
+ /* parent */
+ /* close write */
+ child_pid = ret;
+ sys_close(ctx->daemon_pipe[1]);
+
+ if (ctx->mnt_pid > 0) {
+ ret = waitpid(ctx->mnt_pid, &cstatus, 0);
+ if (!(ret == ctx->mnt_pid)) {
+ if (WIFEXITED(cstatus)) {
+ err = WEXITSTATUS(cstatus);
+ } else {
+ err = cstatus;
+ }
+ gf_msg("daemonize", GF_LOG_ERROR, 0, glusterfsd_msg_25,
+ "mount failed");
+ exit(err);
+ }
+ }
+ sys_read(ctx->daemon_pipe[0], (void *)&err, sizeof(err));
+ /* NOTE: Only the least significant 8 bits i.e (err & 255)
+ will be available to parent process on calling exit() */
+ if (err)
+ _exit(abs(err));
+ ret = glusterfs_pidfile_update(ctx, child_pid);
+ if (ret)
+ _exit(1);
+ _exit(0);
+ }
postfork:
- ret = gf_log_inject_timer_event (ctx);
+ ret = gf_log_inject_timer_event(ctx);
- glusterfs_signals_setup (ctx);
+ glusterfs_signals_setup(ctx);
out:
- return ret;
+ return ret;
}
-
#ifdef GF_LINUX_HOST_OS
static int
-set_oom_score_adj (glusterfs_ctx_t *ctx)
+set_oom_score_adj(glusterfs_ctx_t *ctx)
{
- int ret = -1;
- cmd_args_t *cmd_args = NULL;
- int fd = -1;
- size_t oom_score_len = 0;
- struct oom_api_info *api = NULL;
+ int ret = -1;
+ cmd_args_t *cmd_args = NULL;
+ int fd = -1;
+ size_t oom_score_len = 0;
+ struct oom_api_info *api = NULL;
- cmd_args = &ctx->cmd_args;
+ cmd_args = &ctx->cmd_args;
- if (!cmd_args->oom_score_adj)
- goto success;
+ if (!cmd_args->oom_score_adj)
+ goto success;
- api = get_oom_api_info();
- if (!api)
- goto out;
+ api = get_oom_api_info();
+ if (!api)
+ goto out;
- fd = open (api->oom_api_file, O_WRONLY);
- if (fd < 0)
- goto out;
+ fd = open(api->oom_api_file, O_WRONLY);
+ if (fd < 0)
+ goto out;
- oom_score_len = strlen (cmd_args->oom_score_adj);
- if (sys_write (fd,
- cmd_args->oom_score_adj, oom_score_len) != oom_score_len) {
- sys_close (fd);
- goto out;
- }
+ oom_score_len = strlen(cmd_args->oom_score_adj);
+ if (sys_write(fd, cmd_args->oom_score_adj, oom_score_len) !=
+ oom_score_len) {
+ sys_close(fd);
+ goto out;
+ }
- if (sys_close (fd) < 0)
- goto out;
+ if (sys_close(fd) < 0)
+ goto out;
success:
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
#endif
-
int
-glusterfs_process_volfp (glusterfs_ctx_t *ctx, FILE *fp)
+glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp)
{
- glusterfs_graph_t *graph = NULL;
- int ret = -1;
- xlator_t *trav = NULL;
-
- graph = glusterfs_graph_construct (fp);
- if (!graph) {
- gf_msg ("", GF_LOG_ERROR, 0, glusterfsd_msg_26,
- "failed to construct the graph");
- goto out;
- }
+ glusterfs_graph_t *graph = NULL;
+ int ret = -1;
+ xlator_t *trav = NULL;
- for (trav = graph->first; trav; trav = trav->next) {
- if (strcmp (trav->type, "mount/fuse") == 0) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0,
- glusterfsd_msg_27,
- "fuse xlator cannot be specified in volume "
- "file");
- goto out;
- }
- }
+ graph = glusterfs_graph_construct(fp);
+ if (!graph) {
+ gf_msg("", GF_LOG_ERROR, 0, glusterfsd_msg_26,
+ "failed to construct the graph");
+ goto out;
+ }
- xlator_t *xl = graph->first;
- if (strcmp (xl->type, "protocol/server") == 0) {
- (void) copy_opts_to_child (xl, FIRST_CHILD (xl), "*auth*");
+ for (trav = graph->first; trav; trav = trav->next) {
+ if (strcmp(trav->type, "mount/fuse") == 0) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_27,
+ "fuse xlator cannot be specified in volume "
+ "file");
+ goto out;
}
+ }
- ret = glusterfs_graph_prepare (graph, ctx, ctx->cmd_args.volume_name);
- if (ret) {
- goto out;
- }
+ xlator_t *xl = graph->first;
+ if (strcmp(xl->type, "protocol/server") == 0) {
+ (void)copy_opts_to_child(xl, FIRST_CHILD(xl), "*auth*");
+ }
- ret = glusterfs_graph_activate (graph, ctx);
+ ret = glusterfs_graph_prepare(graph, ctx, ctx->cmd_args.volume_name);
+ if (ret) {
+ goto out;
+ }
- if (ret) {
- goto out;
- }
+ ret = glusterfs_graph_activate(graph, ctx);
- gf_log_dump_graph (fp, graph);
+ if (ret) {
+ goto out;
+ }
- ret = 0;
+ gf_log_dump_graph(fp, graph);
+
+ ret = 0;
out:
- if (fp)
- fclose (fp);
-
- if (ret && !ctx->active) {
- glusterfs_graph_destroy (graph);
- /* there is some error in setting up the first graph itself */
- emancipate (ctx, ret);
- cleanup_and_exit (ret);
- }
+ if (fp)
+ fclose(fp);
- return ret;
-}
+ if (ret && !ctx->active) {
+ glusterfs_graph_destroy(graph);
+ /* there is some error in setting up the first graph itself */
+ emancipate(ctx, ret);
+ cleanup_and_exit(ret);
+ }
+ return ret;
+}
int
-glusterfs_volumes_init (glusterfs_ctx_t *ctx)
+glusterfs_volumes_init(glusterfs_ctx_t *ctx)
{
- FILE *fp = NULL;
- cmd_args_t *cmd_args = NULL;
- int ret = 0;
+ FILE *fp = NULL;
+ cmd_args_t *cmd_args = NULL;
+ int ret = 0;
- cmd_args = &ctx->cmd_args;
+ cmd_args = &ctx->cmd_args;
- if (cmd_args->sock_file) {
- ret = glusterfs_listener_init (ctx);
- if (ret)
- goto out;
- }
+ if (cmd_args->sock_file) {
+ ret = glusterfs_listener_init(ctx);
+ if (ret)
+ goto out;
+ }
- if (cmd_args->volfile_server) {
- ret = glusterfs_mgmt_init (ctx);
- /* return, do not emancipate() yet */
- return ret;
- }
+ if (cmd_args->volfile_server) {
+ ret = glusterfs_mgmt_init(ctx);
+ /* return, do not emancipate() yet */
+ return ret;
+ }
- fp = get_volfp (ctx);
+ fp = get_volfp(ctx);
- if (!fp) {
- gf_msg ("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28,
- "Cannot reach volume specification file");
- ret = -1;
- goto out;
- }
+ if (!fp) {
+ gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28,
+ "Cannot reach volume specification file");
+ ret = -1;
+ goto out;
+ }
- ret = glusterfs_process_volfp (ctx, fp);
- if (ret)
- goto out;
+ ret = glusterfs_process_volfp(ctx, fp);
+ if (ret)
+ goto out;
out:
- emancipate (ctx, ret);
- return ret;
+ emancipate(ctx, ret);
+ return ret;
}
/* This is the only legal global pointer */
glusterfs_ctx_t *glusterfsd_ctx;
int
-main (int argc, char *argv[])
+main(int argc, char *argv[])
{
- glusterfs_ctx_t *ctx = NULL;
- int ret = -1;
- char cmdlinestr[PATH_MAX] = {0,};
- cmd_args_t *cmd = NULL;
-
- mem_pools_init_early ();
-
- gf_check_and_set_mem_acct (argc, argv);
-
- ctx = glusterfs_ctx_new ();
- if (!ctx) {
- gf_msg ("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29,
- "ERROR: glusterfs context not initialized");
- return ENOMEM;
- }
- glusterfsd_ctx = ctx;
-
- ret = glusterfs_globals_init (ctx);
- if (ret)
- return ret;
-
- THIS->ctx = ctx;
-
- ret = glusterfs_ctx_defaults_init (ctx);
- if (ret)
- goto out;
-
- ret = parse_cmdline (argc, argv, ctx);
- if (ret)
- goto out;
- cmd = &ctx->cmd_args;
+ glusterfs_ctx_t *ctx = NULL;
+ int ret = -1;
+ char cmdlinestr[PATH_MAX] = {
+ 0,
+ };
+ cmd_args_t *cmd = NULL;
+
+ mem_pools_init_early();
+
+ gf_check_and_set_mem_acct(argc, argv);
+
+ ctx = glusterfs_ctx_new();
+ if (!ctx) {
+ gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29,
+ "ERROR: glusterfs context not initialized");
+ return ENOMEM;
+ }
+ glusterfsd_ctx = ctx;
+
+ ret = glusterfs_globals_init(ctx);
+ if (ret)
+ return ret;
- if (cmd->print_xlatordir) {
- /* XLATORDIR passed through a -D flag to GCC */
- printf ("%s\n", XLATORDIR);
- goto out;
- }
+ THIS->ctx = ctx;
- if (cmd->print_statedumpdir) {
- printf ("%s\n", DEFAULT_VAR_RUN_DIRECTORY);
- goto out;
- }
+ ret = glusterfs_ctx_defaults_init(ctx);
+ if (ret)
+ goto out;
- if (cmd->print_logdir) {
- printf ("%s\n", DEFAULT_LOG_FILE_DIRECTORY);
- goto out;
- }
+ ret = parse_cmdline(argc, argv, ctx);
+ if (ret)
+ goto out;
+ cmd = &ctx->cmd_args;
- if (cmd->print_libexecdir) {
- printf ("%s\n", LIBEXECDIR);
- goto out;
- }
+ if (cmd->print_xlatordir) {
+ /* XLATORDIR passed through a -D flag to GCC */
+ printf("%s\n", XLATORDIR);
+ goto out;
+ }
+
+ if (cmd->print_statedumpdir) {
+ printf("%s\n", DEFAULT_VAR_RUN_DIRECTORY);
+ goto out;
+ }
+
+ if (cmd->print_logdir) {
+ printf("%s\n", DEFAULT_LOG_FILE_DIRECTORY);
+ goto out;
+ }
+
+ if (cmd->print_libexecdir) {
+ printf("%s\n", LIBEXECDIR);
+ goto out;
+ }
+
+ if (cmd->print_netgroups) {
+ /* If this option is set we want to print & verify the file,
+ * set the return value (exit code in this case) and exit.
+ */
+ ret = print_netgroups_file(cmd->print_netgroups);
+ goto out;
+ }
+
+ if (cmd->print_exports) {
+ /* If this option is set we want to print & verify the file,
+ * set the return value (exit code in this case)
+ * and exit.
+ */
+ ret = print_exports_file(cmd->print_exports);
+ goto out;
+ }
- if (cmd->print_netgroups) {
- /* If this option is set we want to print & verify the file,
- * set the return value (exit code in this case) and exit.
- */
- ret = print_netgroups_file (cmd->print_netgroups);
- goto out;
- }
+ ret = logging_init(ctx, argv[0]);
+ if (ret)
+ goto out;
- if (cmd->print_exports) {
- /* If this option is set we want to print & verify the file,
- * set the return value (exit code in this case)
- * and exit.
- */
- ret = print_exports_file (cmd->print_exports);
+ /* log the version of glusterfs running here along with the actual
+ command line options. */
+ {
+ int i = 0;
+ int pos = 0;
+ int len = snprintf(cmdlinestr, sizeof(cmdlinestr), "%s", argv[0]);
+ for (i = 1; (i < argc) && (len > 0); i++) {
+ pos += len;
+ len = snprintf(cmdlinestr + pos, sizeof(cmdlinestr) - pos, " %s",
+ argv[i]);
+ if ((len <= 0) || (len >= (sizeof(cmdlinestr) - pos))) {
+ gf_msg("glusterfs", GF_LOG_ERROR, 0, glusterfsd_msg_29,
+ "failed to create command line string");
+ ret = -1;
goto out;
+ }
}
+ gf_msg(argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30,
+ "Started running %s version %s (args: %s)", argv[0],
+ PACKAGE_VERSION, cmdlinestr);
- ret = logging_init (ctx, argv[0]);
- if (ret)
- goto out;
-
-
- /* log the version of glusterfs running here along with the actual
- command line options. */
- {
- int i = 0;
- int pos = 0;
- int len = snprintf (cmdlinestr, sizeof (cmdlinestr), "%s", argv[0]);
- for (i = 1; (i < argc) && (len > 0); i++) {
- pos += len;
- len = snprintf (cmdlinestr + pos, sizeof (cmdlinestr) - pos, " %s",
- argv[i]);
- if ((len <= 0) || (len >= (sizeof (cmdlinestr) - pos))) {
- gf_msg ("glusterfs", GF_LOG_ERROR, 0, glusterfsd_msg_29,
- "failed to create command line string");
- ret = -1;
- goto out;
- }
- }
- gf_msg (argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30,
- "Started running %s version %s (args: %s)",
- argv[0], PACKAGE_VERSION, cmdlinestr);
-
- ctx->cmdlinestr = gf_strdup (cmdlinestr);
- }
+ ctx->cmdlinestr = gf_strdup(cmdlinestr);
+ }
- gf_proc_dump_init();
+ gf_proc_dump_init();
- ret = create_fuse_mount (ctx);
- if (ret)
- goto out;
+ ret = create_fuse_mount(ctx);
+ if (ret)
+ goto out;
- ret = daemonize (ctx);
- if (ret)
- goto out;
+ ret = daemonize(ctx);
+ if (ret)
+ goto out;
- /*
- * If we do this before daemonize, the pool-sweeper thread dies with
- * the parent, but we want to do it as soon as possible after that in
- * case something else depends on pool allocations.
- */
- mem_pools_init_late ();
+ /*
+ * If we do this before daemonize, the pool-sweeper thread dies with
+ * the parent, but we want to do it as soon as possible after that in
+ * case something else depends on pool allocations.
+ */
+ mem_pools_init_late();
#ifdef GF_LINUX_HOST_OS
- ret = set_oom_score_adj (ctx);
- if (ret)
- goto out;
+ ret = set_oom_score_adj(ctx);
+ if (ret)
+ goto out;
#endif
- ctx->env = syncenv_new (0, 0, 0);
- if (!ctx->env) {
- gf_msg ("", GF_LOG_ERROR, 0, glusterfsd_msg_31,
- "Could not create new sync-environment");
- goto out;
- }
+ ctx->env = syncenv_new(0, 0, 0);
+ if (!ctx->env) {
+ gf_msg("", GF_LOG_ERROR, 0, glusterfsd_msg_31,
+ "Could not create new sync-environment");
+ goto out;
+ }
- /* do this _after_ daemonize() */
- if (cmd->global_timer_wheel) {
- if (!glusterfs_ctx_tw_get (ctx)) {
- ret = -1;
- goto out;
- }
+ /* do this _after_ daemonize() */
+ if (cmd->global_timer_wheel) {
+ if (!glusterfs_ctx_tw_get(ctx)) {
+ ret = -1;
+ goto out;
}
+ }
- ret = glusterfs_volumes_init (ctx);
- if (ret)
- goto out;
+ ret = glusterfs_volumes_init(ctx);
+ if (ret)
+ goto out;
- ret = event_dispatch (ctx->event_pool);
+ ret = event_dispatch(ctx->event_pool);
out:
-// glusterfs_ctx_destroy (ctx);
- return ret;
+ // glusterfs_ctx_destroy (ctx);
+ return ret;
}