summaryrefslogtreecommitdiffstats
path: root/glusterfsd
diff options
context:
space:
mode:
authorMohammed Rafi KC <rkavunga@redhat.com>2019-06-24 12:00:20 +0530
committerRinku Kothiya <rkothiya@redhat.com>2019-07-24 10:29:17 +0000
commit47fcbc4c055a7880d2926e918ae1e1f57c7db20d (patch)
tree6e2576a1d904aef082229ace4714c99c539428df /glusterfsd
parent3c3b6377d6bdea9bffec31da88dd629347617b6d (diff)
glusterd/svc: update pid of mux volumes from the shd process
For a normal volume, we are updating the pid from a the process while we do a daemonization or at the end of the init if it is no-daemon mode. Along with updating the pid we also lock the file, to make sure that the process is running fine. With brick mux, we were updating the pidfile from gluterd after an attach/detach request. There are two problems with this approach. 1) We are not holding a pidlock for any file other than parent process. 2) There is a chance for possible race conditions with attach/detach. For example, shd start and a volume stop could race. Let's say we are starting an shd and it is attached to a volume. While we trying to link the pid file to the running process, this would have deleted by the thread that doing a volume stop. Backport of : https://review.gluster.org/#/c/glusterfs/+/22935/ >Change-Id: I29a00352102877ce09ea3f376ca52affceb5cf1a >Updates: bz#1722541 >Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com> Change-Id: I29a00352102877ce09ea3f376ca52affceb5cf1a Updates: bz#1732668 Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Diffstat (limited to 'glusterfsd')
-rw-r--r--glusterfsd/src/gf_attach.c2
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c66
2 files changed, 59 insertions, 9 deletions
diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c
index 416cb491147..e688c3c9eb4 100644
--- a/glusterfsd/src/gf_attach.c
+++ b/glusterfsd/src/gf_attach.c
@@ -63,6 +63,8 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
brick_req.name = path;
brick_req.input.input_val = NULL;
brick_req.input.input_len = 0;
+ brick_req.dict.dict_val = NULL;
+ brick_req.dict.dict_len = 0;
req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index e1fbe5d7234..027ff618992 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -50,13 +50,16 @@ int
emancipate(glusterfs_ctx_t *ctx, int ret);
int
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
- char *volfile_id, char *checksum);
+ char *volfile_id, char *checksum,
+ dict_t *dict);
int
glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx,
- gf_volfile_t *volfile_obj, char *checksum);
+ gf_volfile_t *volfile_obj, char *checksum,
+ dict_t *dict);
int
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
- char *volfile_id, char *checksum);
+ char *volfile_id, char *checksum,
+ dict_t *dict);
int
glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj);
@@ -75,7 +78,8 @@ mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
}
int
-mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id)
+mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
+ dict_t *dict)
{
glusterfs_ctx_t *ctx = NULL;
int ret = 0;
@@ -145,11 +149,11 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id)
* the volfile
*/
ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id,
- sha256_hash);
+ sha256_hash, dict);
goto unlock;
}
ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj,
- sha256_hash);
+ sha256_hash, dict);
if (ret < 0) {
gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!");
}
@@ -387,6 +391,8 @@ err:
UNLOCK(&ctx->volfile_lock);
if (xlator_req.input.input_val)
free(xlator_req.input.input_val);
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
free(xlator_req.name);
xlator_req.name = NULL;
return 0;
@@ -561,6 +567,8 @@ out:
free(xlator_req.name);
free(xlator_req.input.input_val);
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
if (dict)
@@ -982,6 +990,8 @@ out:
if (input)
dict_unref(input);
free(xlator_req.input.input_val); /*malloced by xdr*/
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
free(xlator_req.name);
@@ -1062,6 +1072,8 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
out:
UNLOCK(&ctx->volfile_lock);
}
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
free(xlator_req.input.input_val);
free(xlator_req.name);
@@ -1077,6 +1089,7 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
};
xlator_t *this = NULL;
glusterfs_ctx_t *ctx = NULL;
+ dict_t *dict = NULL;
GF_ASSERT(req);
this = THIS;
@@ -1091,20 +1104,41 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
req->rpc_err = GARBAGE_ARGS;
goto out;
}
+
gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41,
"received attach "
"request for volfile-id=%s",
xlator_req.name);
+
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len,
+ &dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
+ "failed to unserialize xdata to dictionary");
+ goto out;
+ }
+ dict->extra_stdfree = xlator_req.dict.dict_val;
+
ret = 0;
if (ctx->active) {
ret = mgmt_process_volfile(xlator_req.input.input_val,
- xlator_req.input.input_len, xlator_req.name);
+ xlator_req.input.input_len, xlator_req.name,
+ dict);
} else {
gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
"got attach for %s but no active graph", xlator_req.name);
}
out:
+ if (dict)
+ dict_unref(dict);
if (xlator_req.input.input_val)
free(xlator_req.input.input_val);
if (xlator_req.name)
@@ -1241,6 +1275,8 @@ out:
GF_FREE(filepath);
if (xlator_req.input.input_val)
free(xlator_req.input.input_val);
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
return ret;
}
@@ -1313,6 +1349,8 @@ out:
if (dict)
dict_unref(dict);
free(xlator_req.input.input_val); // malloced by xdr
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
free(xlator_req.name); // malloced by xdr
@@ -1460,6 +1498,8 @@ out:
if (output)
dict_unref(output);
free(brick_req.input.input_val);
+ if (brick_req.dict.dict_val)
+ free(brick_req.dict.dict_val);
free(brick_req.name);
GF_FREE(msg);
GF_FREE(rsp.output.output_val);
@@ -1652,6 +1692,8 @@ out:
if (dict)
dict_unref(dict);
free(node_req.input.input_val);
+ if (node_req.dict.dict_val)
+ free(node_req.dict.dict_val);
GF_FREE(msg);
GF_FREE(rsp.output.output_val);
GF_FREE(node_name);
@@ -1755,6 +1797,8 @@ glusterfs_handle_nfs_profile(rpcsvc_request_t *req)
out:
free(nfs_req.input.input_val);
+ if (nfs_req.dict.dict_val)
+ free(nfs_req.dict.dict_val);
if (dict)
dict_unref(dict);
if (output)
@@ -1833,6 +1877,8 @@ out:
if (dict)
dict_unref(dict);
free(xlator_req.input.input_val); // malloced by xdr
+ if (xlator_req.dict.dict_val)
+ free(xlator_req.dict.dict_val);
if (output)
dict_unref(output);
free(xlator_req.name); // malloced by xdr
@@ -1961,7 +2007,8 @@ out:
if (dict)
dict_unref(dict);
free(brick_req.input.input_val);
-
+ if (brick_req.dict.dict_val)
+ free(brick_req.dict.dict_val);
gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
@@ -2211,7 +2258,8 @@ volfile:
size = rsp.op_ret;
volfile_id = frame->local;
if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
- ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id);
+ ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id,
+ dict);
goto post_graph_mgmt;
}