summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src')
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c42
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c21
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c84
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c140
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h45
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.c540
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.h17
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c12
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c715
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.h40
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c252
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h27
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tier.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c137
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c60
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c12
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h30
30 files changed, 2000 insertions, 241 deletions
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index 6d09e37477c..1f7a3125ac2 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -18,11 +18,12 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-locks.c glusterd-snapshot.c glusterd-mgmt-handler.c \
glusterd-mgmt.c glusterd-peer-utils.c glusterd-statedump.c \
glusterd-snapshot-utils.c glusterd-conn-mgmt.c \
- glusterd-proc-mgmt.c glusterd-svc-mgmt.c glusterd-shd-svc.c \
+ glusterd-proc-mgmt.c glusterd-svc-mgmt.c \
glusterd-nfs-svc.c glusterd-quotad-svc.c glusterd-svc-helper.c \
glusterd-conn-helper.c glusterd-snapd-svc.c glusterd-snapd-svc-helper.c \
glusterd-bitd-svc.c glusterd-scrub-svc.c glusterd-server-quorum.c \
glusterd-reset-brick.c glusterd-tierd-svc.c glusterd-tierd-svc-helper.c \
+ glusterd-shd-svc.c glusterd-shd-svc-helper.c \
glusterd-gfproxyd-svc.c glusterd-gfproxyd-svc-helper.c
@@ -38,11 +39,12 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
glusterd-mgmt.h glusterd-messages.h glusterd-peer-utils.h \
glusterd-statedump.h glusterd-snapshot-utils.h glusterd-geo-rep.h \
glusterd-conn-mgmt.h glusterd-conn-helper.h glusterd-proc-mgmt.h \
- glusterd-svc-mgmt.h glusterd-shd-svc.h glusterd-nfs-svc.h \
+ glusterd-svc-mgmt.h glusterd-nfs-svc.h \
glusterd-quotad-svc.h glusterd-svc-helper.h glusterd-snapd-svc.h \
glusterd-snapd-svc-helper.h glusterd-rcu.h glusterd-bitd-svc.h \
glusterd-scrub-svc.h glusterd-server-quorum.h glusterd-errno.h \
glusterd-tierd-svc.h glusterd-tierd-svc-helper.h \
+ glusterd-shd-svc.h glusterd-shd-svc-helper.h \
glusterd-gfproxyd-svc.h glusterd-gfproxyd-svc-helper.h \
$(CONTRIBDIR)/userspace-rcu/rculist-extra.h
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 001eac4d39e..e08ea7be215 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -2831,7 +2831,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
}
if (start_remove && volinfo->status == GLUSTERD_STATUS_STARTED) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL,
"Unable to reconfigure NFS-Server");
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
index c1ce2dfda37..09f0a35dc45 100644
--- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
@@ -138,3 +138,45 @@ glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath,
glusterd_set_socket_filepath(sockfilepath, socketpath, len);
return 0;
}
+
+int
+__glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ glusterd_conf_t *conf = THIS->private;
+ glusterd_svc_proc_t *mux_proc = mydata;
+ int ret = -1;
+
+ /* Silently ignoring this error, exactly like the current
+ * implementation */
+ if (!mux_proc)
+ return 0;
+
+ if (event == RPC_CLNT_DESTROY) {
+ /*RPC_CLNT_DESTROY will only called after mux_proc detached from the
+ * list. So it is safe to call without lock. Processing
+ * RPC_CLNT_DESTROY under a lock will lead to deadlock.
+ */
+ if (mux_proc->data) {
+ glusterd_volinfo_unref(mux_proc->data);
+ mux_proc->data = NULL;
+ }
+ GF_FREE(mux_proc);
+ ret = 0;
+ } else {
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ ret = mux_proc->notify(mux_proc, event);
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ }
+ return ret;
+}
+
+int
+glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ return glusterd_big_locked_notify(rpc, mydata, event, data,
+ __glusterd_muxsvc_conn_common_notify);
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
index 602c0ba7b84..d1c4607795a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h
@@ -43,9 +43,11 @@ glusterd_conn_disconnect(glusterd_conn_t *conn);
int
glusterd_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
rpc_clnt_event_t event, void *data);
+int
+glusterd_muxsvc_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data);
int32_t
glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath,
int len);
-
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
index f9c8617526b..b01fd4da24b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
@@ -370,6 +370,7 @@ int
glusterd_gfproxydsvc_restart()
{
glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp = NULL;
int ret = -1;
xlator_t *this = THIS;
glusterd_conf_t *conf = NULL;
@@ -380,7 +381,7 @@ glusterd_gfproxydsvc_restart()
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
+ cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
{
/* Start per volume gfproxyd svc */
if (volinfo->status == GLUSTERD_STATUS_STARTED) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index eb362eb9c1f..58f8b4559d3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -5775,6 +5775,11 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
GF_FREE(rebal_data);
+ fprintf(fp, "Volume%d.shd_svc.online_status: %s\n", count,
+ volinfo->shd.svc.online ? "Online" : "Offline");
+ fprintf(fp, "Volume%d.shd_svc.inited: %s\n", count,
+ volinfo->shd.svc.inited ? "True" : "False");
+
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
ret = glusterd_volume_get_hot_tier_type_str(volinfo,
&hot_tier_type_str);
@@ -5844,12 +5849,6 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
fprintf(fp, "\n[Services]\n");
- if (priv->shd_svc.inited) {
- fprintf(fp, "svc%d.name: %s\n", ++count, priv->shd_svc.name);
- fprintf(fp, "svc%d.online_status: %s\n\n", count,
- priv->shd_svc.online ? "Online" : "Offline");
- }
-
if (priv->nfs_svc.inited) {
fprintf(fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
fprintf(fp, "svc%d.online_status: %s\n\n", count,
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index 5599a639f53..1ba58c3ab9e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -30,6 +30,7 @@
#include "rpcsvc.h"
#include "rpc-common-xdr.h"
#include "glusterd-gfproxyd-svc-helper.h"
+#include "glusterd-shd-svc-helper.h"
extern struct rpc_clnt_program gd_peer_prog;
extern struct rpc_clnt_program gd_mgmt_prog;
@@ -328,6 +329,26 @@ build_volfile_path(char *volume_id, char *path, size_t path_len,
goto out;
}
+ volid_ptr = strstr(volume_id, "shd/");
+ if (volid_ptr) {
+ volid_ptr = strchr(volid_ptr, '/');
+ if (!volid_ptr) {
+ ret = -1;
+ goto out;
+ }
+ volid_ptr++;
+
+ ret = glusterd_volinfo_find(volid_ptr, &volinfo);
+ if (ret == -1) {
+ gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo");
+ goto out;
+ }
+
+ glusterd_svc_build_shd_volfile_path(volinfo, path, path_len);
+ ret = 0;
+ goto out;
+ }
+
volid_ptr = strstr(volume_id, "/snaps/");
if (volid_ptr) {
ret = get_snap_volname_and_volinfo(volid_ptr, &volname, &volinfo);
diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
index 7a784db0372..17052cee263 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mem-types.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
@@ -51,6 +51,7 @@ typedef enum gf_gld_mem_types_ {
gf_gld_mt_missed_snapinfo_t,
gf_gld_mt_snap_create_args_t,
gf_gld_mt_glusterd_brick_proc_t,
+ gf_gld_mt_glusterd_svc_proc_t,
gf_gld_mt_end,
} gf_gld_mem_types_t;
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h
index 1a4bd5499e5..273ac800576 100644
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h
@@ -298,6 +298,8 @@ GLFS_MSGID(
GD_MSG_LOCALTIME_LOGGING_ENABLE, GD_MSG_LOCALTIME_LOGGING_DISABLE,
GD_MSG_PORTS_EXHAUSTED, GD_MSG_CHANGELOG_GET_FAIL,
GD_MSG_MANAGER_FUNCTION_FAILED,
- GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL);
+ GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL, GD_MSG_SHD_START_FAIL,
+ GD_MSG_SHD_OBJ_GET_FAIL, GD_MSG_SVC_ATTACH_FAIL, GD_MSG_ATTACH_INFO,
+ GD_MSG_DETACH_INFO, GD_MSG_SVC_DETACH_FAIL);
#endif /* !_GLUSTERD_MESSAGES_H_ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 84c34f1fe4a..115622d35c6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -44,6 +44,7 @@
#include "glusterd-snapshot-utils.h"
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
+#include "glusterd-shd-svc-helper.h"
#include "glusterd-shd-svc.h"
#include "glusterd-nfs-svc.h"
#include "glusterd-quotad-svc.h"
@@ -2202,6 +2203,11 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -2216,7 +2222,7 @@ glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret)
goto out;
}
@@ -2660,6 +2666,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0,
@@ -2673,7 +2684,7 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
}
}
if (svcs_reconfigure) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(NULL);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
"Unable to restart "
@@ -3054,6 +3065,11 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -3069,7 +3085,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
"Unable to restart services");
@@ -3102,6 +3118,11 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
if (ret)
goto out;
+ svc = &(volinfo->shd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
ret = glusterd_create_volfiles_and_notify_services(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -3117,7 +3138,7 @@ glusterd_op_set_volume(dict_t *dict, char **errstr)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
"Unable to restart services");
@@ -3324,7 +3345,7 @@ glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret)
goto out;
}
@@ -3607,14 +3628,6 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict, 0,
- vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
-
} else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0,
vol_opts);
@@ -3648,6 +3661,12 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
other_count++;
node_count++;
+ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ ret = glusterd_add_shd_to_dict(volinfo, rsp_dict, other_index);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick);
if (ret)
@@ -3710,6 +3729,19 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
node_count++;
}
+ if (glusterd_is_shd_compatible_volume(volinfo)) {
+ shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
+ if (shd_enabled) {
+ ret = glusterd_add_shd_to_dict(volinfo, rsp_dict,
+ other_index);
+ if (ret)
+ goto out;
+ other_count++;
+ other_index++;
+ node_count++;
+ }
+ }
+
nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
_gf_false);
if (!nfs_disabled) {
@@ -3722,18 +3754,6 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
node_count++;
}
- if (glusterd_is_shd_compatible_volume(volinfo))
- shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
- if (shd_enabled) {
- ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict,
- other_index, vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
- other_index++;
- }
-
if (glusterd_is_volume_quota_enabled(volinfo)) {
ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict,
other_index, vol_opts);
@@ -6831,16 +6851,18 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
int ret = -1;
glusterd_conf_t *priv = NULL;
xlator_t *this = NULL;
+ glusterd_svc_t *svc = NULL;
this = THIS;
GF_ASSERT(this);
priv = this->private;
GF_ASSERT(priv);
+ svc = &(volinfo->shd.svc);
switch (heal_op) {
case GF_SHD_OP_INDEX_SUMMARY:
case GF_SHD_OP_STATISTICS_HEAL_COUNT:
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
if (!rsp_dict) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
"Received "
@@ -6861,7 +6883,7 @@ glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
break;
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
if (!rsp_dict) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
"Received "
@@ -6996,7 +7018,7 @@ glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
ret = -1;
goto out;
} else {
- pending_node->node = &(priv->shd_svc);
+ pending_node->node = &(volinfo->shd.svc);
pending_node->type = GD_NODE_SHD;
cds_list_add_tail(&pending_node->list, selected);
pending_node = NULL;
@@ -7130,6 +7152,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
glusterd_pending_node_t *pending_node = NULL;
xlator_t *this = NULL;
glusterd_conf_t *priv = NULL;
+ glusterd_svc_t *svc = NULL;
GF_ASSERT(dict);
@@ -7225,7 +7248,8 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
ret = 0;
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- if (!priv->shd_svc.online) {
+ svc = &(volinfo->shd.svc);
+ if (!svc->online) {
ret = -1;
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SELF_HEALD_DISABLED,
"Self-heal daemon is not running");
@@ -7237,7 +7261,7 @@ glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
ret = -1;
goto out;
}
- pending_node->node = &(priv->shd_svc);
+ pending_node->node = svc;
pending_node->type = GD_NODE_SHD;
pending_node->index = 0;
cds_list_add_tail(&pending_node->list, selected);
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
new file mode 100644
index 00000000000..91967584f35
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.c
@@ -0,0 +1,140 @@
+/*
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "glusterd.h"
+#include "glusterd-utils.h"
+#include "glusterd-shd-svc-helper.h"
+#include "glusterd-messages.h"
+#include "glusterd-volgen.h"
+
+void
+glusterd_svc_build_shd_socket_filepath(glusterd_volinfo_t *volinfo, char *path,
+ int path_len)
+{
+ char sockfilepath[PATH_MAX] = {
+ 0,
+ };
+ char rundir[PATH_MAX] = {
+ 0,
+ };
+ int32_t len = 0;
+ glusterd_conf_t *priv = THIS->private;
+
+ if (!priv)
+ return;
+
+ GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
+ len = snprintf(sockfilepath, sizeof(sockfilepath), "%s/run-%s", rundir,
+ uuid_utoa(MY_UUID));
+ if ((len < 0) || (len >= sizeof(sockfilepath))) {
+ sockfilepath[0] = 0;
+ }
+
+ glusterd_set_socket_filepath(sockfilepath, path, path_len);
+}
+
+void
+glusterd_svc_build_shd_pidfile(glusterd_volinfo_t *volinfo, char *path,
+ int path_len)
+{
+ char rundir[PATH_MAX] = {
+ 0,
+ };
+ glusterd_conf_t *priv = THIS->private;
+
+ if (!priv)
+ return;
+
+ GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
+
+ snprintf(path, path_len, "%s/%s-shd.pid", rundir, volinfo->volname);
+}
+
+void
+glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
+ int path_len)
+{
+ char workdir[PATH_MAX] = {
+ 0,
+ };
+ glusterd_conf_t *priv = THIS->private;
+
+ if (!priv)
+ return;
+
+ GLUSTERD_GET_VOLUME_DIR(workdir, volinfo, priv);
+
+ snprintf(path, path_len, "%s/%s-shd.vol", workdir, volinfo->volname);
+}
+
+void
+glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len)
+{
+ snprintf(logdir, len, "%s/shd/%s", DEFAULT_LOG_FILE_DIRECTORY, volname);
+}
+
+void
+glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len)
+{
+ snprintf(logfile, len, "%s/shd.log", logdir);
+}
+
+void
+glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd)
+{
+ glusterd_svc_proc_t *svc_proc = NULL;
+ glusterd_svc_t *svc = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_boolean_t need_unref = _gf_false;
+ rpc_clnt_t *rpc = NULL;
+
+ conf = THIS->private;
+ if (!conf)
+ return;
+
+ GF_VALIDATE_OR_GOTO(THIS->name, conf, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, shd, out);
+
+ svc = &shd->svc;
+ shd->attached = _gf_false;
+
+ if (svc->conn.rpc) {
+ rpc_clnt_unref(svc->conn.rpc);
+ svc->conn.rpc = NULL;
+ }
+
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ svc_proc = svc->svc_proc;
+ svc->svc_proc = NULL;
+ svc->inited = _gf_false;
+ cds_list_del_init(&svc->mux_svc);
+ glusterd_unlink_file(svc->proc.pidfile);
+
+ if (svc_proc && cds_list_empty(&svc_proc->svcs)) {
+ cds_list_del_init(&svc_proc->svc_proc_list);
+ /* We cannot free svc_proc list from here. Because
+ * if there are pending events on the rpc, it will
+ * try to access the corresponding svc_proc, so unrefing
+ * rpc request and then cleaning up the memory is carried
+ * from the notify function upon RPC_CLNT_DESTROY destroy.
+ */
+ need_unref = _gf_true;
+ rpc = svc_proc->rpc;
+ svc_proc->rpc = NULL;
+ }
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ /*rpc unref has to be performed outside the lock*/
+ if (need_unref && rpc)
+ rpc_clnt_unref(rpc);
+out:
+ return;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
new file mode 100644
index 00000000000..c70702c21f0
--- /dev/null
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc-helper.h
@@ -0,0 +1,45 @@
+/*
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GLUSTERD_SHD_SVC_HELPER_H_
+#define _GLUSTERD_SHD_SVC_HELPER_H_
+
+#include "glusterd.h"
+#include "glusterd-svc-mgmt.h"
+
+void
+glusterd_svc_build_shd_socket_filepath(glusterd_volinfo_t *volinfo, char *path,
+ int path_len);
+
+void
+glusterd_svc_build_shd_pidfile(glusterd_volinfo_t *volinfo, char *path,
+ int path_len);
+
+void
+glusterd_svc_build_shd_volfile_path(glusterd_volinfo_t *volinfo, char *path,
+ int path_len);
+
+void
+glusterd_svc_build_shd_logdir(char *logdir, char *volname, size_t len);
+
+void
+glusterd_svc_build_shd_logfile(char *logfile, char *logdir, size_t len);
+
+void
+glusterd_shd_svcproc_cleanup(glusterd_shdsvc_t *shd);
+
+int
+glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo,
+ glusterd_svc_t *svc, int flags);
+
+int
+glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo);
+
+#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
index f5379b0270b..47898434380 100644
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c
@@ -13,9 +13,10 @@
#include "glusterd.h"
#include "glusterd-utils.h"
#include "glusterd-volgen.h"
-#include "glusterd-svc-mgmt.h"
#include "glusterd-shd-svc.h"
+#include "glusterd-shd-svc-helper.h"
#include "glusterd-svc-helper.h"
+#include "glusterd-store.h"
#define GD_SHD_PROCESS_NAME "--process-name"
char *shd_svc_name = "glustershd";
@@ -23,27 +24,145 @@ char *shd_svc_name = "glustershd";
void
glusterd_shdsvc_build(glusterd_svc_t *svc)
{
+ int ret = -1;
+ ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name);
+ if (ret < 0)
+ return;
+
+ CDS_INIT_LIST_HEAD(&svc->mux_svc);
svc->manager = glusterd_shdsvc_manager;
svc->start = glusterd_shdsvc_start;
- svc->stop = glusterd_svc_stop;
+ svc->stop = glusterd_shdsvc_stop;
+ svc->reconfigure = glusterd_shdsvc_reconfigure;
}
int
-glusterd_shdsvc_init(glusterd_svc_t *svc)
+glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
+ glusterd_svc_proc_t *mux_svc)
{
- return glusterd_svc_init(svc, shd_svc_name);
+ int ret = -1;
+ char rundir[PATH_MAX] = {
+ 0,
+ };
+ char sockpath[PATH_MAX] = {
+ 0,
+ };
+ char pidfile[PATH_MAX] = {
+ 0,
+ };
+ char volfile[PATH_MAX] = {
+ 0,
+ };
+ char logdir[PATH_MAX] = {
+ 0,
+ };
+ char logfile[PATH_MAX] = {
+ 0,
+ };
+ char volfileid[256] = {0};
+ glusterd_svc_t *svc = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_muxsvc_conn_notify_t notify = NULL;
+ xlator_t *this = NULL;
+ char *volfileserver = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(THIS->name, this, out);
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ volinfo = data;
+ GF_VALIDATE_OR_GOTO(this->name, data, out);
+ GF_VALIDATE_OR_GOTO(this->name, mux_svc, out);
+
+ svc = &(volinfo->shd.svc);
+
+ ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name);
+ if (ret < 0)
+ goto out;
+
+ notify = glusterd_muxsvc_common_rpc_notify;
+ glusterd_store_perform_node_state_store(volinfo);
+
+ GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
+ glusterd_svc_create_rundir(rundir);
+
+ glusterd_svc_build_shd_logdir(logdir, volinfo->volname, sizeof(logdir));
+ glusterd_svc_build_shd_logfile(logfile, logdir, sizeof(logfile));
+
+ /* Initialize the connection mgmt */
+ if (mux_conn && mux_svc->rpc) {
+ /* multiplexed svc */
+ svc->conn.frame_timeout = mux_conn->frame_timeout;
+ /* This will be unrefed from glusterd_shd_svcproc_cleanup*/
+ svc->conn.rpc = rpc_clnt_ref(mux_svc->rpc);
+ ret = snprintf(svc->conn.sockpath, sizeof(svc->conn.sockpath), "%s",
+ mux_conn->sockpath);
+ } else {
+ ret = mkdir_p(logdir, 0755, _gf_true);
+ if ((ret == -1) && (EEXIST != errno)) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create logdir %s", logdir);
+ goto out;
+ }
+
+ glusterd_svc_build_shd_socket_filepath(volinfo, sockpath,
+ sizeof(sockpath));
+ ret = glusterd_muxsvc_conn_init(&(svc->conn), mux_svc, sockpath, 600,
+ notify);
+ if (ret)
+ goto out;
+ /* This will be unrefed when the last svcs is detached from the list */
+ if (!mux_svc->rpc)
+ mux_svc->rpc = rpc_clnt_ref(svc->conn.rpc);
+ }
+
+ /* Initialize the process mgmt */
+ glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile));
+ glusterd_svc_build_shd_volfile_path(volinfo, volfile, PATH_MAX);
+ len = snprintf(volfileid, sizeof(volfileid), "shd/%s", volinfo->volname);
+ if ((len < 0) || (len >= sizeof(volfileid))) {
+ ret = -1;
+ goto out;
+ }
+
+ if (dict_get_strn(this->options, "transport.socket.bind-address",
+ SLEN("transport.socket.bind-address"),
+ &volfileserver) != 0) {
+ volfileserver = "localhost";
+ }
+ ret = glusterd_proc_init(&(svc->proc), shd_svc_name, pidfile, logdir,
+ logfile, volfile, volfileid, volfileserver);
+ if (ret)
+ goto out;
+
+out:
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
+ return ret;
}
-static int
-glusterd_shdsvc_create_volfile()
+int
+glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo)
{
char filepath[PATH_MAX] = {
0,
};
+
int ret = -1;
- glusterd_conf_t *conf = THIS->private;
dict_t *mod_dict = NULL;
+ glusterd_svc_build_shd_volfile_path(volinfo, filepath, PATH_MAX);
+ if (!glusterd_is_shd_compatible_volume(volinfo)) {
+ /* If volfile exist, delete it. This case happens when we
+ * change from replica/ec to distribute.
+ */
+ (void)glusterd_unlink_file(filepath);
+ ret = 0;
+ goto out;
+ }
mod_dict = dict_new();
if (!mod_dict)
goto out;
@@ -64,9 +183,7 @@ glusterd_shdsvc_create_volfile()
if (ret)
goto out;
- glusterd_svc_build_volfile_path(shd_svc_name, conf->workdir, filepath,
- sizeof(filepath));
- ret = glusterd_create_global_volfile(build_shd_graph, filepath, mod_dict);
+ ret = glusterd_shdsvc_generate_volfile(volinfo, filepath, mod_dict);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
"Failed to create volfile");
@@ -81,26 +198,89 @@ out:
return ret;
}
+gf_boolean_t
+glusterd_svcs_shd_compatible_volumes_stopped(glusterd_svc_t *svc)
+{
+ glusterd_svc_proc_t *svc_proc = NULL;
+ glusterd_shdsvc_t *shd = NULL;
+ glusterd_svc_t *temp_svc = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ gf_boolean_t comp = _gf_false;
+ glusterd_conf_t *conf = THIS->private;
+
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ svc_proc = svc->svc_proc;
+ if (!svc_proc)
+ goto unlock;
+ cds_list_for_each_entry(temp_svc, &svc_proc->svcs, mux_svc)
+ {
+ /* Get volinfo->shd from svc object */
+ shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
+ if (!shd) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
+ "Failed to get shd object "
+ "from shd service");
+ goto unlock;
+ }
+
+ /* Get volinfo from shd */
+ volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
+ if (!volinfo) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "from shd");
+ goto unlock;
+ }
+ if (!glusterd_is_shd_compatible_volume(volinfo))
+ continue;
+ if (volinfo->status == GLUSTERD_STATUS_STARTED)
+ goto unlock;
+ }
+ comp = _gf_true;
+ }
+unlock:
+ pthread_mutex_unlock(&conf->attach_lock);
+out:
+ return comp;
+}
+
int
glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
{
- int ret = 0;
+ int ret = -1;
glusterd_volinfo_t *volinfo = NULL;
- if (!svc->inited) {
- ret = glusterd_shdsvc_init(svc);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
- "Failed to init shd "
- "service");
- goto out;
- } else {
- svc->inited = _gf_true;
- gf_msg_debug(THIS->name, 0, "shd service initialized");
+ volinfo = data;
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
+
+ if (volinfo)
+ glusterd_volinfo_ref(volinfo);
+
+ ret = glusterd_shdsvc_create_volfile(volinfo);
+ if (ret)
+ goto out;
+
+ if (!glusterd_is_shd_compatible_volume(volinfo)) {
+ ret = 0;
+ if (svc->inited) {
+ /* This means glusterd was running for this volume and now
+ * it was converted to a non-shd volume. So just stop the shd
+ */
+ ret = svc->stop(svc, SIGTERM);
}
+ goto out;
}
- volinfo = data;
+ ret = glusterd_shd_svc_mux_init(volinfo, svc);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
+ "Failed to init shd service");
+ goto out;
+ }
/* If all the volumes are stopped or all shd compatible volumes
* are stopped then stop the service if:
@@ -110,31 +290,26 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
* - volinfo is NULL or
* - volinfo is present and volume is shd compatible
*/
- if (glusterd_are_all_volumes_stopped() ||
- glusterd_all_shd_compatible_volumes_stopped()) {
- if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) {
- ret = svc->stop(svc, SIGTERM);
- }
- } else {
- if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) {
- ret = glusterd_shdsvc_create_volfile();
- if (ret)
- goto out;
-
- ret = svc->stop(svc, SIGTERM);
- if (ret)
- goto out;
+ if (glusterd_svcs_shd_compatible_volumes_stopped(svc)) {
+ /* TODO
+ * Take a lock and detach all svc's to stop the process
+ * also reset the init flag
+ */
+ ret = svc->stop(svc, SIGTERM);
+ } else if (volinfo) {
+ ret = svc->stop(svc, SIGTERM);
+ if (ret)
+ goto out;
+ if (volinfo->status == GLUSTERD_STATUS_STARTED) {
ret = svc->start(svc, flags);
if (ret)
goto out;
-
- ret = glusterd_conn_connect(&(svc->conn));
- if (ret)
- goto out;
}
}
out:
+ if (volinfo)
+ glusterd_volinfo_unref(volinfo);
if (ret)
gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
@@ -143,7 +318,7 @@ out:
}
int
-glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
+glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags)
{
int ret = -1;
char glusterd_uuid_option[PATH_MAX] = {0};
@@ -188,31 +363,136 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
goto out;
ret = glusterd_svc_start(svc, flags, cmdline);
+ if (ret)
+ goto out;
+ ret = glusterd_conn_connect(&(svc->conn));
out:
if (cmdline)
dict_unref(cmdline);
+ return ret;
+}
+int
+glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo,
+ glusterd_svc_t *svc, int flags)
+{
+ int ret = -1;
+ glusterd_svc_proc_t *mux_proc = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ conf = THIS->private;
+
+ if (!conf || !volinfo || !svc)
+ return -1;
+ glusterd_shd_svcproc_cleanup(&volinfo->shd);
+ mux_proc = glusterd_svcprocess_new();
+ if (!mux_proc) {
+ return -1;
+ }
+ ret = glusterd_shdsvc_init(volinfo, NULL, mux_proc);
+ if (ret)
+ return -1;
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs);
+ svc->svc_proc = mux_proc;
+ cds_list_del_init(&svc->mux_svc);
+ cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs);
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+
+ ret = glusterd_new_shd_svc_start(svc, flags);
+ if (!ret) {
+ volinfo->shd.attached = _gf_true;
+ }
+ return ret;
+}
+
+int
+glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
+{
+ int ret = -1;
+ glusterd_shdsvc_t *shd = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ conf = THIS->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+
+ /* Get volinfo->shd from svc object */
+ shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
+ if (!shd) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
+ "Failed to get shd object "
+ "from shd service");
+ return -1;
+ }
+
+ /* Get volinfo from shd */
+ volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
+ if (!volinfo) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "from shd");
+ return -1;
+ }
+
+ if (volinfo->status != GLUSTERD_STATUS_STARTED)
+ return -1;
+
+ glusterd_volinfo_ref(volinfo);
+ if (!svc->inited) {
+ ret = glusterd_shd_svc_mux_init(volinfo, svc);
+ if (ret)
+ goto out;
+ }
+
+ if (shd->attached) {
+ ret = glusterd_attach_svc(svc, volinfo, flags);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to attach shd svc(volume=%s) to pid=%d. Starting"
+ "a new process",
+ volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ ret = glusterd_recover_shd_attach_failure(volinfo, svc, flags);
+ }
+ goto out;
+ }
+ ret = glusterd_new_shd_svc_start(svc, flags);
+ if (!ret) {
+ shd->attached = _gf_true;
+ }
+out:
+ if (volinfo)
+ glusterd_volinfo_unref(volinfo);
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
return ret;
}
int
-glusterd_shdsvc_reconfigure()
+glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo)
{
int ret = -1;
xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
gf_boolean_t identical = _gf_false;
+ dict_t *mod_dict = NULL;
+ glusterd_svc_t *svc = NULL;
this = THIS;
GF_VALIDATE_OR_GOTO("glusterd", this, out);
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
+ if (!volinfo) {
+ /* reconfigure will be called separately*/
+ ret = 0;
+ goto out;
+ }
- if (glusterd_all_shd_compatible_volumes_stopped())
+ glusterd_volinfo_ref(volinfo);
+ svc = &(volinfo->shd.svc);
+ if (glusterd_svcs_shd_compatible_volumes_stopped(svc))
goto manager;
/*
@@ -220,8 +500,42 @@ glusterd_shdsvc_reconfigure()
* and cksum i.e. "character-by-character". If YES, then
* NOTHING has been changed, just return.
*/
- ret = glusterd_svc_check_volfile_identical(priv->shd_svc.name,
- build_shd_graph, &identical);
+
+ if (!glusterd_is_shd_compatible_volume(volinfo)) {
+ if (svc->inited)
+ goto manager;
+
+ /* Nothing to do if not shd compatible */
+ ret = 0;
+ goto out;
+ }
+ mod_dict = dict_new();
+ if (!mod_dict)
+ goto out;
+
+ ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on");
+ if (ret)
+ goto out;
+
+ ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on");
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32(mod_dict, "graph-check", 1);
+ if (ret)
+ goto out;
+
+ ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on");
+ if (ret)
+ goto out;
+
+ ret = glusterd_volume_svc_check_volfile_identical(
+ "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile,
+ &identical);
if (ret)
goto out;
@@ -236,8 +550,9 @@ glusterd_shdsvc_reconfigure()
* changed, then inform the xlator to reconfigure the options.
*/
identical = _gf_false; /* RESET the FLAG */
- ret = glusterd_svc_check_topology_identical(priv->shd_svc.name,
- build_shd_graph, &identical);
+ ret = glusterd_volume_svc_check_topology_identical(
+ "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile,
+ &identical);
if (ret)
goto out;
@@ -245,7 +560,7 @@ glusterd_shdsvc_reconfigure()
* options to shd volfile, so that shd will be reconfigured.
*/
if (identical) {
- ret = glusterd_shdsvc_create_volfile();
+ ret = glusterd_shdsvc_create_volfile(volinfo);
if (ret == 0) { /* Only if above PASSES */
ret = glusterd_fetchspec_notify(THIS);
}
@@ -253,12 +568,129 @@ glusterd_shdsvc_reconfigure()
}
manager:
/*
- * shd volfile's topology has been changed. shd server needs
- * to be RESTARTED to ACT on the changed volfile.
+ * shd volfile's topology has been changed. volfile needs
+ * to be RECONFIGURED to ACT on the changed volfile.
*/
- ret = priv->shd_svc.manager(&(priv->shd_svc), NULL, PROC_START_NO_WAIT);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
out:
+ if (volinfo)
+ glusterd_volinfo_unref(volinfo);
+ if (mod_dict)
+ dict_unref(mod_dict);
gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
return ret;
}
+
+int
+glusterd_shdsvc_restart()
+{
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp = NULL;
+ int ret = -1;
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = NULL;
+ glusterd_svc_t *svc = NULL;
+
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ pthread_mutex_lock(&conf->volume_lock);
+ cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
+ {
+ glusterd_volinfo_ref(volinfo);
+ pthread_mutex_unlock(&conf->volume_lock);
+ /* Start per volume shd svc */
+ if (volinfo->status == GLUSTERD_STATUS_STARTED) {
+ svc = &(volinfo->shd.svc);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SHD_START_FAIL,
+ "Couldn't start shd for "
+ "vol: %s on restart",
+ volinfo->volname);
+ gf_event(EVENT_SVC_MANAGER_FAILED, "volume=%s;svc_name=%s",
+ volinfo->volname, svc->name);
+ glusterd_volinfo_unref(volinfo);
+ goto out;
+ }
+ }
+ glusterd_volinfo_unref(volinfo);
+ pthread_mutex_lock(&conf->volume_lock);
+ }
+ pthread_mutex_unlock(&conf->volume_lock);
+out:
+ return ret;
+}
+
+int
+glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig)
+{
+ int ret = -1;
+ glusterd_svc_proc_t *svc_proc = NULL;
+ glusterd_shdsvc_t *shd = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ gf_boolean_t empty = _gf_false;
+ glusterd_conf_t *conf = NULL;
+ int pid = -1;
+
+ conf = THIS->private;
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ svc_proc = svc->svc_proc;
+ GF_VALIDATE_OR_GOTO("glusterd", svc_proc, out);
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+
+ /* Get volinfo->shd from svc object */
+ shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
+ if (!shd) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
+ "Failed to get shd object "
+ "from shd service");
+ return -1;
+ }
+
+ /* Get volinfo from shd */
+ volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
+ if (!volinfo) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "from shd");
+ return -1;
+ }
+
+ glusterd_volinfo_ref(volinfo);
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ gf_is_service_running(svc->proc.pidfile, &pid);
+ cds_list_del_init(&svc->mux_svc);
+ empty = cds_list_empty(&svc_proc->svcs);
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ if (empty) {
+ /* Unref will happen when destroying the connection */
+ glusterd_volinfo_ref(volinfo);
+ svc_proc->data = volinfo;
+ ret = glusterd_svc_stop(svc, sig);
+ }
+ if (!empty && pid != -1) {
+ ret = glusterd_detach_svc(svc, volinfo, sig);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
+ "shd service is failed to detach volume %s from pid %d",
+ volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ else
+ gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_SVC_STOP_SUCCESS,
+ "Shd service is detached for volume %s from pid %d",
+ volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ }
+ svc->online = _gf_false;
+ (void)glusterd_unlink_file((char *)svc->proc.pidfile);
+ glusterd_shd_svcproc_cleanup(shd);
+ ret = 0;
+ glusterd_volinfo_unref(volinfo);
+out:
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.h b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
index 775a9d44a2c..55b409f4b69 100644
--- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
+++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.h
@@ -12,12 +12,20 @@
#define _GLUSTERD_SHD_SVC_H_
#include "glusterd-svc-mgmt.h"
+#include "glusterd.h"
+
+typedef struct glusterd_shdsvc_ glusterd_shdsvc_t;
+struct glusterd_shdsvc_ {
+ glusterd_svc_t svc;
+ gf_boolean_t attached;
+};
void
glusterd_shdsvc_build(glusterd_svc_t *svc);
int
-glusterd_shdsvc_init(glusterd_svc_t *svc);
+glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
+ glusterd_svc_proc_t *svc_proc);
int
glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags);
@@ -27,4 +35,11 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags);
int
glusterd_shdsvc_reconfigure();
+
+int
+glusterd_shdsvc_restart();
+
+int
+glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index 73a11a3eace..f0fcd956d30 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -748,6 +748,16 @@ glusterd_peer_detach_cleanup(glusterd_conf_t *priv)
}
}
+ if (glusterd_is_shd_compatible_volume(volinfo)) {
+ svc = &(volinfo->shd.svc);
+ ret = svc->stop(svc, SIGTERM);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
+ "Failed "
+ "to stop shd daemon service");
+ }
+ }
+
if (glusterd_is_gfproxyd_enabled(volinfo)) {
svc = &(volinfo->gfproxyd.svc);
ret = svc->stop(svc, SIGTERM);
@@ -775,7 +785,7 @@ glusterd_peer_detach_cleanup(glusterd_conf_t *priv)
}
/*Reconfigure all daemon services upon peer detach*/
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(NULL);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
"Failed to reconfigure all daemon services.");
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
index 56bab0717cd..1da4076573c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
@@ -366,6 +366,7 @@ int
glusterd_snapdsvc_restart()
{
glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp = NULL;
int ret = 0;
xlator_t *this = THIS;
glusterd_conf_t *conf = NULL;
@@ -376,7 +377,7 @@ glusterd_snapdsvc_restart()
conf = this->private;
GF_ASSERT(conf);
- cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
+ cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
{
/* Start per volume snapd svc */
if (volinfo->status == GLUSTERD_STATUS_STARTED) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-statedump.c b/xlators/mgmt/glusterd/src/glusterd-statedump.c
index f5ecde7ff35..69d4cf4aacb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-statedump.c
+++ b/xlators/mgmt/glusterd/src/glusterd-statedump.c
@@ -202,9 +202,6 @@ glusterd_dump_priv(xlator_t *this)
gf_proc_dump_build_key(key, "glusterd", "ping-timeout");
gf_proc_dump_write(key, "%d", priv->ping_timeout);
- gf_proc_dump_build_key(key, "glusterd", "shd.online");
- gf_proc_dump_write(key, "%d", priv->shd_svc.online);
-
gf_proc_dump_build_key(key, "glusterd", "nfs.online");
gf_proc_dump_write(key, "%d", priv->nfs_svc.online);
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
index ca19a75ba82..e42703c0f41 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
@@ -7,6 +7,7 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
+#include <signal.h>
#include <glusterfs/globals.h>
#include <glusterfs/run.h>
@@ -20,12 +21,14 @@
#include "glusterd-bitd-svc.h"
#include "glusterd-tierd-svc.h"
#include "glusterd-tierd-svc-helper.h"
+#include "glusterd-shd-svc-helper.h"
#include "glusterd-scrub-svc.h"
#include "glusterd-svc-helper.h"
#include <glusterfs/syscall.h>
+#include "glusterd-snapshot-utils.h"
int
-glusterd_svcs_reconfigure()
+glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo)
{
int ret = 0;
xlator_t *this = THIS;
@@ -43,9 +46,11 @@ glusterd_svcs_reconfigure()
goto out;
svc_name = "self-heald";
- ret = glusterd_shdsvc_reconfigure();
- if (ret)
- goto out;
+ if (volinfo) {
+ ret = glusterd_shdsvc_reconfigure(volinfo);
+ if (ret)
+ goto out;
+ }
if (conf->op_version == GD_OP_VERSION_MIN)
goto out;
@@ -69,7 +74,7 @@ out:
}
int
-glusterd_svcs_stop()
+glusterd_svcs_stop(glusterd_volinfo_t *volinfo)
{
int ret = 0;
xlator_t *this = NULL;
@@ -85,14 +90,16 @@ glusterd_svcs_stop()
if (ret)
goto out;
- ret = glusterd_svc_stop(&(priv->shd_svc), SIGTERM);
- if (ret)
- goto out;
-
ret = glusterd_svc_stop(&(priv->quotad_svc), SIGTERM);
if (ret)
goto out;
+ if (volinfo) {
+ ret = glusterd_svc_stop(&(volinfo->shd.svc), PROC_START_NO_WAIT);
+ if (ret)
+ goto out;
+ }
+
ret = glusterd_svc_stop(&(priv->bitd_svc), SIGTERM);
if (ret)
goto out;
@@ -121,12 +128,6 @@ glusterd_svcs_manager(glusterd_volinfo_t *volinfo)
if (ret)
goto out;
- ret = conf->shd_svc.manager(&(conf->shd_svc), volinfo, PROC_START_NO_WAIT);
- if (ret == -EINVAL)
- ret = 0;
- if (ret)
- goto out;
-
if (conf->op_version == GD_OP_VERSION_MIN)
goto out;
@@ -143,6 +144,15 @@ glusterd_svcs_manager(glusterd_volinfo_t *volinfo)
if (ret)
goto out;
+ if (volinfo) {
+ ret = volinfo->shd.svc.manager(&(volinfo->shd.svc), volinfo,
+ PROC_START_NO_WAIT);
+ if (ret == -EINVAL)
+ ret = 0;
+ if (ret)
+ goto out;
+ }
+
ret = conf->scrub_svc.manager(&(conf->scrub_svc), NULL, PROC_START_NO_WAIT);
if (ret == -EINVAL)
ret = 0;
@@ -269,3 +279,678 @@ out:
GF_FREE(tmpvol);
return ret;
}
+
+int
+glusterd_volume_svc_check_volfile_identical(
+ char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo,
+ glusterd_vol_graph_builder_t builder, gf_boolean_t *identical)
+{
+ char orgvol[PATH_MAX] = {
+ 0,
+ };
+ char *tmpvol = NULL;
+ xlator_t *this = NULL;
+ int ret = -1;
+ int need_unlink = 0;
+ int tmp_fd = -1;
+
+ this = THIS;
+
+ GF_VALIDATE_OR_GOTO(this->name, this, out);
+ GF_VALIDATE_OR_GOTO(this->name, identical, out);
+
+ /* This builds volfile for volume level dameons */
+ glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol,
+ sizeof(orgvol));
+
+ ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
+ tmp_fd = mkstemp(tmpvol);
+ if (tmp_fd < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to create temp file"
+ " %s:(%s)",
+ tmpvol, strerror(errno));
+ ret = -1;
+ goto out;
+ }
+
+ need_unlink = 1;
+
+ ret = builder(volinfo, tmpvol, mode_dict);
+ if (ret)
+ goto out;
+
+ ret = glusterd_check_files_identical(orgvol, tmpvol, identical);
+out:
+ if (need_unlink)
+ sys_unlink(tmpvol);
+
+ if (tmpvol != NULL)
+ GF_FREE(tmpvol);
+
+ if (tmp_fd >= 0)
+ sys_close(tmp_fd);
+
+ return ret;
+}
+
+int
+glusterd_volume_svc_check_topology_identical(
+ char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo,
+ glusterd_vol_graph_builder_t builder, gf_boolean_t *identical)
+{
+ char orgvol[PATH_MAX] = {
+ 0,
+ };
+ char *tmpvol = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = THIS;
+ int ret = -1;
+ int tmpclean = 0;
+ int tmpfd = -1;
+
+ if ((!identical) || (!this) || (!this->private))
+ goto out;
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ /* This builds volfile for volume level dameons */
+ glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol,
+ sizeof(orgvol));
+ /* Create the temporary volfile */
+ ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
+ tmpfd = mkstemp(tmpvol);
+ if (tmpfd < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to create temp file"
+ " %s:(%s)",
+ tmpvol, strerror(errno));
+ ret = -1;
+ goto out;
+ }
+
+ tmpclean = 1; /* SET the flag to unlink() tmpfile */
+
+ ret = builder(volinfo, tmpvol, mode_dict);
+ if (ret)
+ goto out;
+
+ /* Compare the topology of volfiles */
+ ret = glusterd_check_topology_identical(orgvol, tmpvol, identical);
+out:
+ if (tmpfd >= 0)
+ sys_close(tmpfd);
+ if (tmpclean)
+ sys_unlink(tmpvol);
+ if (tmpvol != NULL)
+ GF_FREE(tmpvol);
+ return ret;
+}
+
+void *
+__gf_find_compatible_svc(gd_node_type daemon)
+{
+ glusterd_svc_proc_t *svc_proc = NULL;
+ glusterd_svc_proc_t *return_proc = NULL;
+ glusterd_svc_t *parent_svc = NULL;
+ struct cds_list_head *svc_procs = NULL;
+ glusterd_conf_t *conf = NULL;
+ int pid = -1;
+
+ conf = THIS->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+
+ if (daemon == GD_NODE_SHD) {
+ svc_procs = &conf->shd_procs;
+ if (!svc_procs)
+ goto out;
+ }
+
+ cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
+ {
+ parent_svc = cds_list_entry(svc_proc->svcs.next, glusterd_svc_t,
+ mux_svc);
+ if (!return_proc)
+ return_proc = svc_proc;
+
+ /* If there is an already running shd daemons, select it. Otehrwise
+ * select the first one.
+ */
+ if (parent_svc && gf_is_service_running(parent_svc->proc.pidfile, &pid))
+ return (void *)svc_proc;
+ /*
+ * Logic to select one process goes here. Currently there is only one
+ * shd_proc. So selecting the first one;
+ */
+ }
+out:
+ return return_proc;
+}
+
+glusterd_svc_proc_t *
+glusterd_svcprocess_new()
+{
+ glusterd_svc_proc_t *new_svcprocess = NULL;
+
+ new_svcprocess = GF_CALLOC(1, sizeof(*new_svcprocess),
+ gf_gld_mt_glusterd_svc_proc_t);
+
+ if (!new_svcprocess)
+ return NULL;
+
+ CDS_INIT_LIST_HEAD(&new_svcprocess->svc_proc_list);
+ CDS_INIT_LIST_HEAD(&new_svcprocess->svcs);
+ new_svcprocess->notify = glusterd_muxsvc_common_rpc_notify;
+ return new_svcprocess;
+}
+
+int
+glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
+{
+ int ret = -1;
+ glusterd_svc_proc_t *mux_proc = NULL;
+ glusterd_conn_t *mux_conn = NULL;
+ glusterd_conf_t *conf = NULL;
+ glusterd_svc_t *parent_svc = NULL;
+ int pid = -1;
+
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
+ conf = THIS->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ if (!svc->inited) {
+ if (gf_is_service_running(svc->proc.pidfile, &pid)) {
+ /* Just connect is required, but we don't know what happens
+ * during the disconnect. So better to reattach.
+ */
+ mux_proc = __gf_find_compatible_svc_from_pid(GD_NODE_SHD, pid);
+ }
+
+ if (!mux_proc) {
+ if (pid != -1 && sys_access(svc->proc.pidfile, R_OK) == 0) {
+ /* stale pid file, unlink it. */
+ kill(pid, SIGTERM);
+ sys_unlink(svc->proc.pidfile);
+ }
+ mux_proc = __gf_find_compatible_svc(GD_NODE_SHD);
+ }
+ if (mux_proc) {
+ /* Take first entry from the process */
+ parent_svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t,
+ mux_svc);
+ sys_link(parent_svc->proc.pidfile, svc->proc.pidfile);
+ mux_conn = &parent_svc->conn;
+ if (volinfo)
+ volinfo->shd.attached = _gf_true;
+ } else {
+ mux_proc = glusterd_svcprocess_new();
+ if (!mux_proc) {
+ ret = -1;
+ goto unlock;
+ }
+ cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs);
+ }
+ svc->svc_proc = mux_proc;
+ cds_list_del_init(&svc->mux_svc);
+ cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs);
+ ret = glusterd_shdsvc_init(volinfo, mux_conn, mux_proc);
+ if (ret) {
+ pthread_mutex_unlock(&conf->attach_lock);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
+ "Failed to init shd "
+ "service");
+ goto out;
+ }
+ gf_msg_debug(THIS->name, 0, "shd service initialized");
+ svc->inited = _gf_true;
+ }
+ ret = 0;
+ }
+unlock:
+ pthread_mutex_unlock(&conf->attach_lock);
+out:
+ return ret;
+}
+
+void *
+__gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid)
+{
+ glusterd_svc_proc_t *svc_proc = NULL;
+ struct cds_list_head *svc_procs = NULL;
+ glusterd_svc_t *svc = NULL;
+ pid_t mux_pid = -1;
+ glusterd_conf_t *conf = NULL;
+
+ conf = THIS->private;
+ if (!conf)
+ return NULL;
+
+ if (daemon == GD_NODE_SHD) {
+ svc_procs = &conf->shd_procs;
+ if (!svc_proc)
+ return NULL;
+ } /* Can be moved to switch when mux is implemented for other daemon; */
+
+ cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
+ {
+ cds_list_for_each_entry(svc, &svc_proc->svcs, mux_svc)
+ {
+ if (gf_is_service_running(svc->proc.pidfile, &mux_pid)) {
+ if (mux_pid == pid) {
+ /*TODO
+ * inefficient loop, but at the moment, there is only
+ * one shd.
+ */
+ return svc_proc;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+static int32_t
+my_callback(struct rpc_req *req, struct iovec *iov, int count, void *v_frame)
+{
+ call_frame_t *frame = v_frame;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ GF_VALIDATE_OR_GOTO("glusterd", frame, out);
+ this = frame->this;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ GF_ATOMIC_DEC(conf->blockers);
+
+ STACK_DESTROY(frame->root);
+out:
+ return 0;
+}
+
+static int32_t
+glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *v_frame)
+{
+ call_frame_t *frame = v_frame;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_shdsvc_t *shd = NULL;
+ glusterd_svc_t *svc = frame->cookie;
+ glusterd_svc_t *parent_svc = NULL;
+ glusterd_svc_proc_t *mux_proc = NULL;
+ glusterd_conf_t *conf = NULL;
+ int *flag = (int *)frame->local;
+ xlator_t *this = THIS;
+ int pid = -1;
+ int ret = -1;
+ gf_getspec_rsp rsp = {
+ 0,
+ };
+
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", frame, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (!strcmp(svc->name, "glustershd")) {
+ /* Get volinfo->shd from svc object */
+ shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
+ if (!shd) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
+ "Failed to get shd object "
+ "from shd service");
+ goto out;
+ }
+
+ /* Get volinfo from shd */
+ volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
+ if (!volinfo) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "from shd");
+ goto out;
+ }
+ }
+
+ if (!iov) {
+ gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "iov is NULL");
+ ret = -1;
+ goto out;
+ }
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
+ if (ret < 0) {
+ gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "XDR decoding error");
+ ret = -1;
+ goto out;
+ }
+
+ if (rsp.op_ret == 0) {
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ if (!strcmp(svc->name, "glustershd")) {
+ mux_proc = svc->svc_proc;
+ if (mux_proc &&
+ !gf_is_service_running(svc->proc.pidfile, &pid)) {
+ /*
+ * When svc's are restarting, there is a chance that the
+ * attached svc might not have updated it's pid. Because
+ * it was at connection stage. So in that case, we need
+ * to retry the pid file copy.
+ */
+ parent_svc = cds_list_entry(mux_proc->svcs.next,
+ glusterd_svc_t, mux_svc);
+ if (parent_svc)
+ sys_link(parent_svc->proc.pidfile, svc->proc.pidfile);
+ }
+ }
+ svc->online = _gf_true;
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "svc %s of volume %s attached successfully to pid %d", svc->name,
+ volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "svc %s of volume %s failed to "
+ "attach to pid %d. Starting a new process",
+ svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+ if (!strcmp(svc->name, "glustershd")) {
+ glusterd_recover_shd_attach_failure(volinfo, svc, *flag);
+ }
+ }
+out:
+ if (flag) {
+ GF_FREE(flag);
+ }
+ GF_ATOMIC_DEC(conf->blockers);
+ STACK_DESTROY(frame->root);
+ return 0;
+}
+
+extern size_t
+build_volfile_path(char *volume_id, char *path, size_t path_len,
+ char *trusted_str);
+
+int
+__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
+ struct rpc_clnt *rpc, char *volfile_id,
+ int op)
+{
+ int ret = -1;
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ struct iovec iov = {
+ 0,
+ };
+ char path[PATH_MAX] = {
+ '\0',
+ };
+ struct stat stbuf = {
+ 0,
+ };
+ int32_t spec_fd = -1;
+ size_t file_len = -1;
+ char *volfile_content = NULL;
+ ssize_t req_size = 0;
+ call_frame_t *frame = NULL;
+ gd1_mgmt_brick_op_req brick_req;
+ void *req = &brick_req;
+ void *errlbl = &&err;
+ struct rpc_clnt_connection *conn;
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = THIS->private;
+ extern struct rpc_clnt_program gd_brick_prog;
+ fop_cbk_fn_t cbkfn = my_callback;
+
+ if (!rpc) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_PARAM_NULL,
+ "called with null rpc");
+ return -1;
+ }
+
+ conn = &rpc->conn;
+ if (!conn->connected || conn->disconnected) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
+ "not connected yet");
+ return -1;
+ }
+
+ brick_req.op = op;
+ brick_req.name = volfile_id;
+ brick_req.input.input_val = NULL;
+ brick_req.input.input_len = 0;
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ goto *errlbl;
+ }
+
+ if (op == GLUSTERD_SVC_ATTACH) {
+ (void)build_volfile_path(volfile_id, path, sizeof(path), NULL);
+
+ ret = sys_stat(path, &stbuf);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "Unable to stat %s (%s)", path, strerror(errno));
+ ret = -EINVAL;
+ goto *errlbl;
+ }
+
+ file_len = stbuf.st_size;
+ volfile_content = GF_MALLOC(file_len + 1, gf_common_mt_char);
+ if (!volfile_content) {
+ ret = -ENOMEM;
+ goto *errlbl;
+ }
+ spec_fd = open(path, O_RDONLY);
+ if (spec_fd < 0) {
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "failed to read volfile %s", path);
+ ret = -EIO;
+ goto *errlbl;
+ }
+ ret = sys_read(spec_fd, volfile_content, file_len);
+ if (ret == file_len) {
+ brick_req.input.input_val = volfile_content;
+ brick_req.input.input_len = file_len;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "read failed on path %s. File size=%" GF_PRI_SIZET
+ "read size=%d",
+ path, file_len, ret);
+ ret = -EIO;
+ goto *errlbl;
+ }
+
+ frame->cookie = svc;
+ frame->local = GF_CALLOC(1, sizeof(int), gf_gld_mt_int);
+ *((int *)frame->local) = flags;
+ cbkfn = glusterd_svc_attach_cbk;
+ }
+
+ req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
+ iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
+ if (!iobuf) {
+ goto *errlbl;
+ }
+ errlbl = &&maybe_free_iobuf;
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_pagesize(iobuf);
+
+ iobref = iobref_new();
+ if (!iobref) {
+ goto *errlbl;
+ }
+ errlbl = &&free_iobref;
+
+ iobref_add(iobref, iobuf);
+ /*
+ * Drop our reference to the iobuf. The iobref should already have
+ * one after iobref_add, so when we unref that we'll free the iobuf as
+ * well. This allows us to pass just the iobref as frame->local.
+ */
+ iobuf_unref(iobuf);
+ /* Set the pointer to null so we don't free it on a later error. */
+ iobuf = NULL;
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic(iov, req, (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+ if (ret == -1) {
+ goto *errlbl;
+ }
+ iov.iov_len = ret;
+
+ /* Send the msg */
+ GF_ATOMIC_INC(conf->blockers);
+ ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0,
+ iobref, frame, NULL, 0, NULL, 0, NULL);
+ GF_FREE(volfile_content);
+ if (spec_fd >= 0)
+ sys_close(spec_fd);
+ return ret;
+
+free_iobref:
+ iobref_unref(iobref);
+maybe_free_iobuf:
+ if (iobuf) {
+ iobuf_unref(iobuf);
+ }
+err:
+ GF_FREE(volfile_content);
+ if (spec_fd >= 0)
+ sys_close(spec_fd);
+ if (frame)
+ STACK_DESTROY(frame->root);
+ return -1;
+}
+
+int
+glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int flags)
+{
+ glusterd_conf_t *conf = THIS->private;
+ int ret = -1;
+ int tries;
+ rpc_clnt_t *rpc = NULL;
+
+ GF_VALIDATE_OR_GOTO("glusterd", conf, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+ GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_ATTACH_INFO,
+ "adding svc %s (volume=%s) to existing "
+ "process with pid %d",
+ svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+
+ rpc = rpc_clnt_ref(svc->conn.rpc);
+ for (tries = 15; tries > 0; --tries) {
+ if (rpc) {
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ ret = __glusterd_send_svc_configure_req(
+ svc, flags, rpc, svc->proc.volfileid, GLUSTERD_SVC_ATTACH);
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ if (!ret) {
+ volinfo->shd.attached = _gf_true;
+ goto out;
+ }
+ }
+ /*
+ * It might not actually be safe to manipulate the lock
+ * like this, but if we don't then the connection can
+ * never actually complete and retries are useless.
+ * Unfortunately, all of the alternatives (e.g. doing
+ * all of this in a separate thread) are much more
+ * complicated and risky.
+ * TBD: see if there's a better way
+ */
+ synclock_unlock(&conf->big_lock);
+ sleep(1);
+ synclock_lock(&conf->big_lock);
+ }
+ ret = -1;
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL,
+ "attach failed for %s(volume=%s)", svc->name, volinfo->volname);
+out:
+ if (rpc)
+ rpc_clnt_unref(rpc);
+ return ret;
+}
+
+int
+glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig)
+{
+ glusterd_conf_t *conf = THIS->private;
+ int ret = -1;
+ int tries;
+ rpc_clnt_t *rpc = NULL;
+
+ GF_VALIDATE_OR_GOTO(THIS->name, conf, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, svc, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
+
+ gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DETACH_INFO,
+ "removing svc %s (volume=%s) from existing "
+ "process with pid %d",
+ svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
+
+ rpc = rpc_clnt_ref(svc->conn.rpc);
+ for (tries = 15; tries > 0; --tries) {
+ if (rpc) {
+ /*For detach there is no flags, and we are not using sig.*/
+ pthread_mutex_lock(&conf->attach_lock);
+ {
+ ret = __glusterd_send_svc_configure_req(svc, 0, svc->conn.rpc,
+ svc->proc.volfileid,
+ GLUSTERD_SVC_DETACH);
+ }
+ pthread_mutex_unlock(&conf->attach_lock);
+ if (!ret) {
+ goto out;
+ }
+ }
+ /*
+ * It might not actually be safe to manipulate the lock
+ * like this, but if we don't then the connection can
+ * never actually complete and retries are useless.
+ * Unfortunately, all of the alternatives (e.g. doing
+ * all of this in a separate thread) are much more
+ * complicated and risky.
+ * TBD: see if there's a better way
+ */
+ synclock_unlock(&conf->big_lock);
+ sleep(1);
+ synclock_lock(&conf->big_lock);
+ }
+ ret = -1;
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_DETACH_FAIL,
+ "detach failed for %s(volume=%s)", svc->name, volinfo->volname);
+out:
+ if (rpc)
+ rpc_clnt_unref(rpc);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.h b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
index cc98e788bbe..5def2468785 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.h
@@ -16,10 +16,10 @@
#include "glusterd-volgen.h"
int
-glusterd_svcs_reconfigure();
+glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo);
int
-glusterd_svcs_stop();
+glusterd_svcs_stop(glusterd_volinfo_t *vol);
int
glusterd_svcs_manager(glusterd_volinfo_t *volinfo);
@@ -41,5 +41,41 @@ int
glusterd_svc_check_tier_topology_identical(char *svc_name,
glusterd_volinfo_t *volinfo,
gf_boolean_t *identical);
+int
+glusterd_volume_svc_check_volfile_identical(char *svc_name, dict_t *mode_dict,
+ glusterd_volinfo_t *volinfo,
+ glusterd_vol_graph_builder_t,
+ gf_boolean_t *identical);
+int
+glusterd_volume_svc_check_topology_identical(char *svc_name, dict_t *mode_dict,
+ glusterd_volinfo_t *volinfo,
+ glusterd_vol_graph_builder_t,
+ gf_boolean_t *identical);
+void
+glusterd_volume_svc_build_volfile_path(char *server, glusterd_volinfo_t *vol,
+ char *volfile, size_t len);
+void *
+__gf_find_compatible_svc(gd_node_type daemon);
+
+glusterd_svc_proc_t *
+glusterd_svcprocess_new();
+
+int
+glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc);
+
+void *
+__gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid);
+
+int
+glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo,
+ int flags);
+
+int
+glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig);
+
+int
+__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flag,
+ struct rpc_clnt *rpc, char *volfile_id,
+ int op);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
index 6325f60f94a..63c6b609bbd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
@@ -18,6 +18,7 @@
#include "glusterd-conn-mgmt.h"
#include "glusterd-messages.h"
#include <glusterfs/syscall.h>
+#include "glusterd-shd-svc-helper.h"
int
glusterd_svc_create_rundir(char *rundir)
@@ -167,72 +168,79 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline)
GF_ASSERT(this);
priv = this->private;
- GF_ASSERT(priv);
+ GF_VALIDATE_OR_GOTO("glusterd", priv, out);
+ GF_VALIDATE_OR_GOTO("glusterd", svc, out);
+
+ pthread_mutex_lock(&priv->attach_lock);
+ {
+ if (glusterd_proc_is_running(&(svc->proc))) {
+ ret = 0;
+ goto unlock;
+ }
- if (glusterd_proc_is_running(&(svc->proc))) {
- ret = 0;
- goto out;
- }
+ ret = sys_access(svc->proc.volfile, F_OK);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_NOT_FOUND,
+ "Volfile %s is not present", svc->proc.volfile);
+ goto unlock;
+ }
- ret = sys_access(svc->proc.volfile, F_OK);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_NOT_FOUND,
- "Volfile %s is not present", svc->proc.volfile);
- goto out;
- }
+ runinit(&runner);
- runinit(&runner);
+ if (this->ctx->cmd_args.valgrind) {
+ len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log",
+ svc->proc.logfile, svc->name);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ ret = -1;
+ goto unlock;
+ }
- if (this->ctx->cmd_args.valgrind) {
- len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log",
- svc->proc.logfile, svc->name);
- if ((len < 0) || (len >= PATH_MAX)) {
- ret = -1;
- goto out;
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
- runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
- }
-
- runner_add_args(&runner, SBIN_DIR "/glusterfs", "-s",
- svc->proc.volfileserver, "--volfile-id",
- svc->proc.volfileid, "-p", svc->proc.pidfile, "-l",
- svc->proc.logfile, "-S", svc->conn.sockpath, NULL);
+ runner_add_args(&runner, SBIN_DIR "/glusterfs", "-s",
+ svc->proc.volfileserver, "--volfile-id",
+ svc->proc.volfileid, "-p", svc->proc.pidfile, "-l",
+ svc->proc.logfile, "-S", svc->conn.sockpath, NULL);
- if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
- SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY),
- &localtime_logging) == 0) {
- if (strcmp(localtime_logging, "enable") == 0)
- runner_add_arg(&runner, "--localtime-logging");
- }
- if (dict_get_strn(priv->opts, GLUSTERD_DAEMON_LOG_LEVEL_KEY,
- SLEN(GLUSTERD_DAEMON_LOG_LEVEL_KEY), &log_level) == 0) {
- snprintf(daemon_log_level, 30, "--log-level=%s", log_level);
- runner_add_arg(&runner, daemon_log_level);
- }
+ if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
+ SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY),
+ &localtime_logging) == 0) {
+ if (strcmp(localtime_logging, "enable") == 0)
+ runner_add_arg(&runner, "--localtime-logging");
+ }
+ if (dict_get_strn(priv->opts, GLUSTERD_DAEMON_LOG_LEVEL_KEY,
+ SLEN(GLUSTERD_DAEMON_LOG_LEVEL_KEY),
+ &log_level) == 0) {
+ snprintf(daemon_log_level, 30, "--log-level=%s", log_level);
+ runner_add_arg(&runner, daemon_log_level);
+ }
- if (this->ctx->cmd_args.global_threading) {
- runner_add_arg(&runner, "--global-threading");
- }
+ if (this->ctx->cmd_args.global_threading) {
+ runner_add_arg(&runner, "--global-threading");
+ }
- if (cmdline)
- dict_foreach(cmdline, svc_add_args, (void *)&runner);
+ if (cmdline)
+ dict_foreach(cmdline, svc_add_args, (void *)&runner);
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_START_SUCCESS,
- "Starting %s service", svc->name);
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_START_SUCCESS,
+ "Starting %s service", svc->name);
- if (flags == PROC_START_NO_WAIT) {
- ret = runner_run_nowait(&runner);
- } else {
- synclock_unlock(&priv->big_lock);
- {
- ret = runner_run(&runner);
+ if (flags == PROC_START_NO_WAIT) {
+ ret = runner_run_nowait(&runner);
+ } else {
+ synclock_unlock(&priv->big_lock);
+ {
+ ret = runner_run(&runner);
+ }
+ synclock_lock(&priv->big_lock);
}
- synclock_lock(&priv->big_lock);
}
-
+unlock:
+ pthread_mutex_unlock(&priv->attach_lock);
out:
gf_msg_debug(this->name, 0, "Returning %d", ret);
@@ -285,7 +293,8 @@ glusterd_svc_build_volfile_path(char *server, char *workdir, char *volfile,
glusterd_svc_build_svcdir(server, workdir, dir, sizeof(dir));
- if (!strcmp(server, "quotad")) /*quotad has different volfile name*/
+ if (!strcmp(server, "quotad"))
+ /*quotad has different volfile name*/
snprintf(volfile, len, "%s/%s.vol", dir, server);
else
snprintf(volfile, len, "%s/%s-server.vol", dir, server);
@@ -370,3 +379,138 @@ glusterd_svc_common_rpc_notify(glusterd_conn_t *conn, rpc_clnt_event_t event)
return ret;
}
+
+void
+glusterd_volume_svc_build_volfile_path(char *server, glusterd_volinfo_t *vol,
+ char *volfile, size_t len)
+{
+ GF_ASSERT(len == PATH_MAX);
+
+ if (!strcmp(server, "glustershd")) {
+ glusterd_svc_build_shd_volfile_path(vol, volfile, len);
+ }
+}
+
+int
+glusterd_muxsvc_common_rpc_notify(glusterd_svc_proc_t *mux_proc,
+ rpc_clnt_event_t event)
+{
+ int ret = 0;
+ glusterd_svc_t *svc = NULL;
+ glusterd_svc_t *tmp = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t need_logging = _gf_false;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (!mux_proc) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL,
+ "Failed to get the svc proc data");
+ return -1;
+ }
+
+ /* Currently this function was used for shd svc, if this function is
+ * using for another svc, change ths glustershd reference. We can get
+ * the svc name from any of the attached svc's
+ */
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ gf_msg_debug(this->name, 0,
+ "glustershd has connected with glusterd.");
+ gf_event(EVENT_SVC_CONNECTED, "svc_name=glustershd");
+ cds_list_for_each_entry_safe(svc, tmp, &mux_proc->svcs, mux_svc)
+ {
+ if (svc->online)
+ continue;
+ svc->online = _gf_true;
+ }
+ break;
+
+ case RPC_CLNT_DISCONNECT:
+ cds_list_for_each_entry_safe(svc, tmp, &mux_proc->svcs, mux_svc)
+ {
+ if (svc->online) {
+ if (!need_logging)
+ need_logging = _gf_true;
+ svc->online = _gf_false;
+ }
+ }
+ if (need_logging) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_NODE_DISCONNECTED,
+ "glustershd has disconnected from glusterd.");
+ gf_event(EVENT_SVC_DISCONNECTED, "svc_name=glustershd");
+ }
+ break;
+
+ default:
+ gf_msg_trace(this->name, 0, "got some other RPC event %d", event);
+ break;
+ }
+
+ return ret;
+}
+
+int
+glusterd_muxsvc_conn_init(glusterd_conn_t *conn, glusterd_svc_proc_t *mux_proc,
+ char *sockpath, int frame_timeout,
+ glusterd_muxsvc_conn_notify_t notify)
+{
+ int ret = -1;
+ dict_t *options = NULL;
+ struct rpc_clnt *rpc = NULL;
+ xlator_t *this = THIS;
+ glusterd_svc_t *svc = NULL;
+
+ options = dict_new();
+ if (!this || !options)
+ goto out;
+
+ svc = cds_list_entry(conn, glusterd_svc_t, conn);
+ if (!svc) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL,
+ "Failed to get the service");
+ goto out;
+ }
+
+ ret = rpc_transport_unix_options_build(options, sockpath, frame_timeout);
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32n(options, "transport.socket.ignore-enoent",
+ SLEN("transport.socket.ignore-enoent"), 1);
+ if (ret)
+ goto out;
+
+ /* @options is free'd by rpc_transport when destroyed */
+ rpc = rpc_clnt_new(options, this, (char *)svc->name, 16);
+ if (!rpc) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = rpc_clnt_register_notify(rpc, glusterd_muxsvc_conn_common_notify,
+ mux_proc);
+ if (ret)
+ goto out;
+
+ ret = snprintf(conn->sockpath, sizeof(conn->sockpath), "%s", sockpath);
+ if (ret < 0)
+ goto out;
+ else
+ ret = 0;
+
+ conn->frame_timeout = frame_timeout;
+ conn->rpc = rpc;
+ mux_proc->notify = notify;
+out:
+ if (options)
+ dict_unref(options);
+ if (ret) {
+ if (rpc) {
+ rpc_clnt_unref(rpc);
+ rpc = NULL;
+ }
+ }
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
index c850bfda68f..fbc522549e0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h
@@ -13,9 +13,12 @@
#include "glusterd-proc-mgmt.h"
#include "glusterd-conn-mgmt.h"
+#include "glusterd-rcu.h"
struct glusterd_svc_;
+
typedef struct glusterd_svc_ glusterd_svc_t;
+typedef struct glusterd_svc_proc_ glusterd_svc_proc_t;
typedef void (*glusterd_svc_build_t)(glusterd_svc_t *svc);
@@ -25,6 +28,17 @@ typedef int (*glusterd_svc_start_t)(glusterd_svc_t *svc, int flags);
typedef int (*glusterd_svc_stop_t)(glusterd_svc_t *svc, int sig);
typedef int (*glusterd_svc_reconfigure_t)(void *data);
+typedef int (*glusterd_muxsvc_conn_notify_t)(glusterd_svc_proc_t *mux_proc,
+ rpc_clnt_event_t event);
+
+struct glusterd_svc_proc_ {
+ struct cds_list_head svc_proc_list;
+ struct cds_list_head svcs;
+ glusterd_muxsvc_conn_notify_t notify;
+ rpc_clnt_t *rpc;
+ void *data;
+};
+
struct glusterd_svc_ {
char name[NAME_MAX];
glusterd_conn_t conn;
@@ -35,6 +49,8 @@ struct glusterd_svc_ {
gf_boolean_t online;
gf_boolean_t inited;
glusterd_svc_reconfigure_t reconfigure;
+ glusterd_svc_proc_t *svc_proc;
+ struct cds_list_head mux_svc;
};
int
@@ -69,4 +85,15 @@ glusterd_svc_reconfigure(int (*create_volfile)());
int
glusterd_svc_common_rpc_notify(glusterd_conn_t *conn, rpc_clnt_event_t event);
+int
+glusterd_muxsvc_common_rpc_notify(glusterd_svc_proc_t *conn,
+ rpc_clnt_event_t event);
+
+int
+glusterd_proc_get_pid(glusterd_proc_t *proc);
+
+int
+glusterd_muxsvc_conn_init(glusterd_conn_t *conn, glusterd_svc_proc_t *mux_proc,
+ char *sockpath, int frame_timeout,
+ glusterd_muxsvc_conn_notify_t notify);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-tier.c b/xlators/mgmt/glusterd/src/glusterd-tier.c
index dd86cf504ff..e4b79dfc862 100644
--- a/xlators/mgmt/glusterd/src/glusterd-tier.c
+++ b/xlators/mgmt/glusterd/src/glusterd-tier.c
@@ -27,6 +27,7 @@
#include "glusterd-messages.h"
#include "glusterd-mgmt.h"
#include "glusterd-syncop.h"
+#include "glusterd-shd-svc-helper.h"
#include <sys/wait.h>
#include <dlfcn.h>
@@ -615,7 +616,7 @@ glusterd_op_remove_tier_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (cmd == GF_DEFRAG_CMD_DETACH_START &&
volinfo->status == GLUSTERD_STATUS_STARTED) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL,
"Unable to reconfigure NFS-Server");
diff --git a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
index 04ceec5d85e..ab463f19425 100644
--- a/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-tierd-svc.c
@@ -83,7 +83,6 @@ glusterd_tierdsvc_init(void *data)
goto out;
notify = glusterd_svc_common_rpc_notify;
- glusterd_store_perform_node_state_store(volinfo);
volinfo->type = GF_CLUSTER_TYPE_TIER;
@@ -395,6 +394,7 @@ int
glusterd_tierdsvc_restart()
{
glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp = NULL;
int ret = 0;
xlator_t *this = THIS;
glusterd_conf_t *conf = NULL;
@@ -405,7 +405,7 @@ glusterd_tierdsvc_restart()
conf = this->private;
GF_VALIDATE_OR_GOTO(this->name, conf, out);
- cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
+ cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
{
/* Start per volume tierd svc */
if (volinfo->status == GLUSTERD_STATUS_STARTED &&
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index e95bf9665c9..3570b018eeb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -61,6 +61,7 @@
#include "glusterd-server-quorum.h"
#include <glusterfs/quota-common-utils.h>
#include <glusterfs/common-utils.h>
+#include "glusterd-shd-svc-helper.h"
#include "xdr-generic.h"
#include <sys/resource.h>
@@ -580,13 +581,17 @@ glusterd_volinfo_t *
glusterd_volinfo_unref(glusterd_volinfo_t *volinfo)
{
int refcnt = -1;
+ glusterd_conf_t *conf = THIS->private;
- pthread_mutex_lock(&volinfo->reflock);
+ pthread_mutex_lock(&conf->volume_lock);
{
- refcnt = --volinfo->refcnt;
+ pthread_mutex_lock(&volinfo->reflock);
+ {
+ refcnt = --volinfo->refcnt;
+ }
+ pthread_mutex_unlock(&volinfo->reflock);
}
- pthread_mutex_unlock(&volinfo->reflock);
-
+ pthread_mutex_unlock(&conf->volume_lock);
if (!refcnt) {
glusterd_volinfo_delete(volinfo);
return NULL;
@@ -658,6 +663,7 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo)
glusterd_snapdsvc_build(&new_volinfo->snapd.svc);
glusterd_tierdsvc_build(&new_volinfo->tierd.svc);
glusterd_gfproxydsvc_build(&new_volinfo->gfproxyd.svc);
+ glusterd_shdsvc_build(&new_volinfo->shd.svc);
pthread_mutex_init(&new_volinfo->reflock, NULL);
*volinfo = glusterd_volinfo_ref(new_volinfo);
@@ -1023,11 +1029,11 @@ glusterd_volinfo_delete(glusterd_volinfo_t *volinfo)
gf_store_handle_destroy(volinfo->snapd.handle);
glusterd_auth_cleanup(volinfo);
+ glusterd_shd_svcproc_cleanup(&volinfo->shd);
pthread_mutex_destroy(&volinfo->reflock);
GF_FREE(volinfo);
ret = 0;
-
out:
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
return ret;
@@ -3553,6 +3559,7 @@ glusterd_spawn_daemons(void *opaque)
ret = glusterd_snapdsvc_restart();
ret = glusterd_tierdsvc_restart();
ret = glusterd_gfproxydsvc_restart();
+ ret = glusterd_shdsvc_restart();
return ret;
}
@@ -4503,6 +4510,9 @@ glusterd_delete_stale_volume(glusterd_volinfo_t *stale_volinfo,
svc = &(stale_volinfo->snapd.svc);
(void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT);
}
+ svc = &(stale_volinfo->shd.svc);
+ (void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT);
+
(void)glusterd_volinfo_remove(stale_volinfo);
return 0;
@@ -4617,6 +4627,15 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
glusterd_volinfo_unref(old_volinfo);
}
+ ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
+ "Failed to store "
+ "volinfo for volume %s",
+ new_volinfo->volname);
+ goto out;
+ }
+
if (glusterd_is_volume_started(new_volinfo)) {
(void)glusterd_start_bricks(new_volinfo);
if (glusterd_is_snapd_enabled(new_volinfo)) {
@@ -4625,15 +4644,10 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
}
}
- }
-
- ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
- "Failed to store "
- "volinfo for volume %s",
- new_volinfo->volname);
- goto out;
+ svc = &(new_volinfo->shd.svc);
+ if (svc->manager(svc, new_volinfo, PROC_START_NO_WAIT)) {
+ gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+ }
}
ret = glusterd_create_volfiles_and_notify_services(new_volinfo);
@@ -5108,9 +5122,7 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
glusterd_svc_build_pidfile_path(server, priv->rundir, pidfile,
sizeof(pidfile));
- if (strcmp(server, priv->shd_svc.name) == 0)
- svc = &(priv->shd_svc);
- else if (strcmp(server, priv->nfs_svc.name) == 0)
+ if (strcmp(server, priv->nfs_svc.name) == 0)
svc = &(priv->nfs_svc);
else if (strcmp(server, priv->quotad_svc.name) == 0)
svc = &(priv->quotad_svc);
@@ -5141,9 +5153,6 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
if (!strcmp(server, priv->nfs_svc.name))
ret = dict_set_nstrn(dict, key, keylen, "NFS Server",
SLEN("NFS Server"));
- else if (!strcmp(server, priv->shd_svc.name))
- ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
- SLEN("Self-heal Daemon"));
else if (!strcmp(server, priv->quotad_svc.name))
ret = dict_set_nstrn(dict, key, keylen, "Quota Daemon",
SLEN("Quota Daemon"));
@@ -8709,6 +8718,21 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid)
"to stop snapd daemon service");
}
}
+
+ if (glusterd_is_shd_compatible_volume(volinfo)) {
+ /*
+ * Sending stop request for all volumes. So it is fine
+ * to send stop for mux shd
+ */
+ svc = &(volinfo->shd.svc);
+ ret = svc->stop(svc, SIGTERM);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
+ "Failed "
+ "to stop shd daemon service");
+ }
+ }
+
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
svc = &(volinfo->tierd.svc);
ret = svc->stop(svc, SIGTERM);
@@ -8734,7 +8758,7 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid)
}
/* Reconfigure all daemon services upon peer detach */
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(NULL);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
"Failed to reconfigure all daemon services.");
@@ -14286,3 +14310,74 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo)
return _gf_true;
return _gf_false;
}
+
+int32_t
+glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
+ int32_t count)
+{
+ int ret = -1;
+ int32_t pid = -1;
+ int32_t brick_online = -1;
+ char key[64] = {0};
+ int keylen;
+ char *pidfile = NULL;
+ xlator_t *this = NULL;
+ char *uuid_str = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(THIS->name, this, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
+ ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
+ SLEN("Self-heal Daemon"));
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "brick%d.path", count);
+ uuid_str = gf_strdup(uuid_utoa(MY_UUID));
+ if (!uuid_str) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstrn(dict, key, keylen, uuid_str);
+ if (ret)
+ goto out;
+ uuid_str = NULL;
+
+ /* shd doesn't have a port. but the cli needs a port key with
+ * a zero value to parse.
+ * */
+
+ keylen = snprintf(key, sizeof(key), "brick%d.port", count);
+ ret = dict_set_int32n(dict, key, keylen, 0);
+ if (ret)
+ goto out;
+
+ pidfile = volinfo->shd.svc.proc.pidfile;
+
+ brick_online = gf_is_service_running(pidfile, &pid);
+
+ /* If shd is not running, then don't print the pid */
+ if (!brick_online)
+ pid = -1;
+ keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
+ ret = dict_set_int32n(dict, key, keylen, pid);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "brick%d.status", count);
+ ret = dict_set_int32n(dict, key, keylen, brick_online);
+
+out:
+ if (uuid_str)
+ GF_FREE(uuid_str);
+ if (ret)
+ gf_msg(this ? this->name : "glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_DICT_SET_FAILED,
+ "Returning %d. adding values to dict failed", ret);
+
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 9bf19a67528..3647c343b47 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -876,4 +876,8 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo);
char *
search_brick_path_from_proc(pid_t brick_pid, char *brickpath);
+
+int32_t
+glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
+ int32_t count);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index a0a9c3b6644..9f3496b03e2 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -36,6 +36,7 @@
#include "glusterd-svc-mgmt.h"
#include "glusterd-svc-helper.h"
#include "glusterd-snapd-svc-helper.h"
+#include "glusterd-shd-svc-helper.h"
#include "glusterd-gfproxyd-svc-helper.h"
struct gd_validate_reconf_opts {
@@ -4780,7 +4781,7 @@ volgen_get_shd_key(int type)
static int
volgen_set_shd_key_enable(dict_t *set_dict, const int type)
{
- int ret = -1;
+ int ret = 0;
switch (type) {
case GF_CLUSTER_TYPE_REPLICATE:
@@ -5064,24 +5065,15 @@ out:
static int
build_shd_volume_graph(xlator_t *this, volgen_graph_t *graph,
glusterd_volinfo_t *volinfo, dict_t *mod_dict,
- dict_t *set_dict, gf_boolean_t graph_check,
- gf_boolean_t *valid_config)
+ dict_t *set_dict, gf_boolean_t graph_check)
{
volgen_graph_t cgraph = {0};
int ret = 0;
int clusters = -1;
- if (!graph_check && (volinfo->status != GLUSTERD_STATUS_STARTED))
- goto out;
-
if (!glusterd_is_shd_compatible_volume(volinfo))
goto out;
- /* Shd graph is valid only when there is at least one
- * replica/disperse volume is present
- */
- *valid_config = _gf_true;
-
ret = prepare_shd_volume_options(volinfo, mod_dict, set_dict);
if (ret)
goto out;
@@ -5111,19 +5103,16 @@ out:
}
int
-build_shd_graph(volgen_graph_t *graph, dict_t *mod_dict)
+build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph,
+ dict_t *mod_dict)
{
- glusterd_volinfo_t *voliter = NULL;
xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
dict_t *set_dict = NULL;
int ret = 0;
- gf_boolean_t valid_config = _gf_false;
xlator_t *iostxl = NULL;
gf_boolean_t graph_check = _gf_false;
this = THIS;
- priv = this->private;
set_dict = dict_new();
if (!set_dict) {
@@ -5133,26 +5122,18 @@ build_shd_graph(volgen_graph_t *graph, dict_t *mod_dict)
if (mod_dict)
graph_check = dict_get_str_boolean(mod_dict, "graph-check", 0);
- iostxl = volgen_graph_add_as(graph, "debug/io-stats", "glustershd");
+ iostxl = volgen_graph_add_as(graph, "debug/io-stats", volinfo->volname);
if (!iostxl) {
ret = -1;
goto out;
}
- cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
- {
- ret = build_shd_volume_graph(this, graph, voliter, mod_dict, set_dict,
- graph_check, &valid_config);
- ret = dict_reset(set_dict);
- if (ret)
- goto out;
- }
+ ret = build_shd_volume_graph(this, graph, volinfo, mod_dict, set_dict,
+ graph_check);
out:
if (set_dict)
dict_unref(set_dict);
- if (!valid_config)
- ret = -EINVAL;
return ret;
}
@@ -6469,6 +6450,10 @@ glusterd_create_volfiles(glusterd_volinfo_t *volinfo)
if (ret)
gf_log(this->name, GF_LOG_ERROR, "Could not generate gfproxy volfiles");
+ ret = glusterd_shdsvc_create_volfile(volinfo);
+ if (ret)
+ gf_log(this->name, GF_LOG_ERROR, "Could not generate shd volfiles");
+
dict_del_sizen(volinfo->dict, "skip-CLIOT");
out:
@@ -6549,7 +6534,7 @@ validate_shdopts(glusterd_volinfo_t *volinfo, dict_t *val_dict,
ret = dict_set_int32_sizen(val_dict, "graph-check", 1);
if (ret)
goto out;
- ret = build_shd_graph(&graph, val_dict);
+ ret = build_shd_graph(volinfo, &graph, val_dict);
if (!ret)
ret = graph_reconf_validateopt(&graph.graph, op_errstr);
@@ -6926,3 +6911,22 @@ gd_is_boolean_option(char *key)
return _gf_false;
}
+
+int
+glusterd_shdsvc_generate_volfile(glusterd_volinfo_t *volinfo, char *filename,
+ dict_t *mode_dict)
+{
+ int ret = -1;
+ volgen_graph_t graph = {
+ 0,
+ };
+
+ graph.type = GF_SHD;
+ ret = build_shd_graph(volinfo, &graph, mode_dict);
+ if (!ret)
+ ret = volgen_write_volfile(&graph, filename);
+
+ volgen_graph_free(&graph);
+
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.h b/xlators/mgmt/glusterd/src/glusterd-volgen.h
index 37eecc04bef..5c4bfe0db0b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.h
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.h
@@ -69,6 +69,7 @@ typedef enum {
GF_REBALANCED = 1,
GF_QUOTAD,
GF_SNAPD,
+ GF_SHD,
} glusterd_graph_type_t;
struct volgen_graph {
@@ -80,6 +81,8 @@ typedef struct volgen_graph volgen_graph_t;
typedef int (*glusterd_graph_builder_t)(volgen_graph_t *graph,
dict_t *mod_dict);
+typedef int (*glusterd_vol_graph_builder_t)(glusterd_volinfo_t *,
+ char *filename, dict_t *mod_dict);
#define COMPLETE_OPTION(key, completion, ret) \
do { \
@@ -204,7 +207,8 @@ void
glusterd_get_shd_filepath(char *filename);
int
-build_shd_graph(volgen_graph_t *graph, dict_t *mod_dict);
+build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph,
+ dict_t *mod_dict);
int
build_nfs_graph(volgen_graph_t *graph, dict_t *mod_dict);
@@ -316,4 +320,9 @@ glusterd_generate_gfproxyd_volfile(glusterd_volinfo_t *volinfo);
int
glusterd_build_gfproxyd_volfile(glusterd_volinfo_t *volinfo, char *filename);
+
+int
+glusterd_shdsvc_generate_volfile(glusterd_volinfo_t *volinfo, char *filename,
+ dict_t *mode_dict);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index 317cb04dd00..f9c7a2d70e9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -1799,7 +1799,7 @@ static int
glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
dict_t *dict, char **op_errstr)
{
- glusterd_conf_t *priv = NULL;
+ glusterd_svc_t *svc = NULL;
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
int ret = 0;
char msg[2408] = {
@@ -1809,7 +1809,6 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
"Self-heal daemon is not running. "
"Check self-heal daemon log file.";
- priv = this->private;
ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"),
(int32_t *)&heal_op);
if (ret) {
@@ -1818,6 +1817,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
goto out;
}
+ svc = &(volinfo->shd.svc);
switch (heal_op) {
case GF_SHD_OP_INVALID:
case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/
@@ -1847,7 +1847,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
goto out;
}
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
ret = -1;
*op_errstr = gf_strdup(offline_msg);
goto out;
@@ -1868,7 +1868,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
goto out;
}
- if (!priv->shd_svc.online) {
+ if (!svc->online) {
ret = -1;
*op_errstr = gf_strdup(offline_msg);
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index 280c9f3640e..d4ab63095e2 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -1537,14 +1537,6 @@ init(xlator_t *this)
exit(1);
}
- ret = glusterd_init_var_run_dirs(this, rundir, GLUSTERD_GLUSTERSHD_RUN_DIR);
- if (ret) {
- gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CREATE_DIR_FAILED,
- "Unable to create "
- "glustershd running directory");
- exit(1);
- }
-
ret = glusterd_init_var_run_dirs(this, rundir, GLUSTERD_NFS_RUN_DIR);
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CREATE_DIR_FAILED,
@@ -1819,6 +1811,9 @@ init(xlator_t *this)
CDS_INIT_LIST_HEAD(&conf->snapshots);
CDS_INIT_LIST_HEAD(&conf->missed_snaps_list);
CDS_INIT_LIST_HEAD(&conf->brick_procs);
+ CDS_INIT_LIST_HEAD(&conf->shd_procs);
+ pthread_mutex_init(&conf->attach_lock, NULL);
+ pthread_mutex_init(&conf->volume_lock, NULL);
pthread_mutex_init(&conf->mutex, NULL);
conf->rpc = rpc;
@@ -1899,7 +1894,6 @@ init(xlator_t *this)
glusterd_mgmt_v3_lock_timer_init();
glusterd_txn_opinfo_dict_init();
- glusterd_shdsvc_build(&conf->shd_svc);
glusterd_nfssvc_build(&conf->nfs_svc);
glusterd_quotadsvc_build(&conf->quotad_svc);
glusterd_bitdsvc_build(&conf->bitd_svc);
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 0a2fffdbe63..eb89e3be93e 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -28,6 +28,7 @@
#include "glusterd-sm.h"
#include "glusterd-snapd-svc.h"
#include "glusterd-tierd-svc.h"
+#include "glusterd-shd-svc.h"
#include "glusterd-bitd-svc.h"
#include "glusterd1-xdr.h"
#include "protocol-common.h"
@@ -167,7 +168,6 @@ typedef struct {
char workdir[VALID_GLUSTERD_PATHMAX];
char rundir[VALID_GLUSTERD_PATHMAX];
rpcsvc_t *rpc;
- glusterd_svc_t shd_svc;
glusterd_svc_t nfs_svc;
glusterd_svc_t bitd_svc;
glusterd_svc_t scrub_svc;
@@ -176,6 +176,7 @@ typedef struct {
struct cds_list_head volumes;
struct cds_list_head snapshots; /*List of snap volumes */
struct cds_list_head brick_procs; /* List of brick processes */
+ struct cds_list_head shd_procs; /* List of shd processes */
pthread_mutex_t xprt_lock;
struct list_head xprt_list;
pthread_mutex_t import_volumes;
@@ -216,6 +217,11 @@ typedef struct {
gf_atomic_t blockers;
uint32_t mgmt_v3_lock_timeout;
gf_boolean_t restart_bricks;
+ pthread_mutex_t attach_lock; /* Lock can be per process or a common one */
+ pthread_mutex_t volume_lock; /* We release the big_lock from lot of places
+ which might lead the modification of volinfo
+ list.
+ */
} glusterd_conf_t;
typedef enum gf_brick_status {
@@ -495,6 +501,7 @@ struct glusterd_volinfo_ {
glusterd_snapdsvc_t snapd;
glusterd_tierdsvc_t tierd;
+ glusterd_shdsvc_t shd;
glusterd_gfproxydsvc_t gfproxyd;
int32_t quota_xattr_version;
gf_boolean_t stage_deleted; /* volume has passed staging
@@ -621,7 +628,6 @@ typedef enum {
#define GLUSTERD_DEFAULT_SNAPS_BRICK_DIR "/gluster/snaps"
#define GLUSTERD_BITD_RUN_DIR "/bitd"
#define GLUSTERD_SCRUB_RUN_DIR "/scrub"
-#define GLUSTERD_GLUSTERSHD_RUN_DIR "/glustershd"
#define GLUSTERD_NFS_RUN_DIR "/nfs"
#define GLUSTERD_QUOTAD_RUN_DIR "/quotad"
#define GLUSTER_SHARED_STORAGE_BRICK_DIR GLUSTERD_DEFAULT_WORKDIR "/ss_brick"
@@ -677,6 +683,26 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
} \
} while (0)
+#define GLUSTERD_GET_SHD_RUNDIR(path, volinfo, priv) \
+ do { \
+ int32_t _shd_dir_len; \
+ _shd_dir_len = snprintf(path, PATH_MAX, "%s/shd/%s", priv->rundir, \
+ volinfo->volname); \
+ if ((_shd_dir_len < 0) || (_shd_dir_len >= PATH_MAX)) { \
+ path[0] = 0; \
+ } \
+ } while (0)
+
+#define GLUSTERD_GET_SHD_PID_FILE(path, volinfo, priv) \
+ do { \
+ int32_t _shd_pid_len; \
+ _shd_pid_len = snprintf(path, PATH_MAX, "%s/shd/%s-shd.pid", \
+ priv->rundir, volinfo->volname); \
+ if ((_shd_pid_len < 0) || (_shd_pid_len >= PATH_MAX)) { \
+ path[0] = 0; \
+ } \
+ } while (0)
+
#define GLUSTERD_GET_VOLUME_PID_DIR(path, volinfo, priv) \
do { \
int32_t _vol_pid_len; \