summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt')
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c129
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ganesha.c58
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.c16
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-log-ops.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c139
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c323
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c18
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c70
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c237
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h15
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c213
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c52
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c7
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c28
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h5
26 files changed, 1150 insertions, 241 deletions
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am
index eaa61c435e5..685beb42d27 100644
--- a/xlators/mgmt/glusterd/src/Makefile.am
+++ b/xlators/mgmt/glusterd/src/Makefile.am
@@ -25,13 +25,14 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \
glusterd-conn-helper.c glusterd-snapd-svc.c glusterd-snapd-svc-helper.c \
glusterd-bitd-svc.c glusterd-scrub-svc.c glusterd-server-quorum.c \
glusterd-reset-brick.c glusterd-shd-svc.c glusterd-shd-svc-helper.c \
- glusterd-gfproxyd-svc.c glusterd-gfproxyd-svc-helper.c glusterd-ganesha.c
+ glusterd-gfproxyd-svc.c glusterd-gfproxyd-svc-helper.c glusterd-ganesha.c \
+ $(CONTRIBDIR)/mount/mntent.c
glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/libglusterd/src/libglusterd.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
- $(XML_LIBS) -lcrypto $(URCU_LIBS) $(URCU_CDS_LIBS) $(LIB_DL)
+ $(XML_LIBS) -lcrypto $(URCU_LIBS) $(URCU_CDS_LIBS) $(LIB_DL) $(GF_XLATOR_MGNT_LIBADD)
noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
glusterd-sm.h glusterd-store.h glusterd-mem-types.h \
@@ -46,7 +47,8 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \
glusterd-scrub-svc.h glusterd-server-quorum.h glusterd-errno.h \
glusterd-shd-svc.h glusterd-shd-svc-helper.h \
glusterd-gfproxyd-svc.h glusterd-gfproxyd-svc-helper.h \
- $(CONTRIBDIR)/userspace-rcu/rculist-extra.h
+ $(CONTRIBDIR)/userspace-rcu/rculist-extra.h \
+ $(CONTRIBDIR)/mount/mntent_compat.h
AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
-I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 6d1a1e98848..e56cd0e6c74 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -1359,14 +1359,14 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
"Unable to get volume name");
goto out;
}
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
"Unable to find volume: %s", volname);
goto out;
}
@@ -1378,13 +1378,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
&replica_count);
if (ret) {
- gf_msg_debug(THIS->name, 0, "Unable to get replica count");
- }
-
- ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
- &arbiter_count);
- if (ret) {
- gf_msg_debug(THIS->name, 0, "No arbiter count present in the dict");
+ gf_msg_debug(this->name, 0, "Unable to get replica count");
}
if (replica_count > 0) {
@@ -1400,18 +1394,18 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
glusterd_add_peers_to_auth_list(volname);
- if (glusterd_is_volume_replicate(volinfo)) {
+ if (replica_count && glusterd_is_volume_replicate(volinfo)) {
/* Do not allow add-brick for stopped volumes when replica-count
* is being increased.
*/
- if (conf->op_version >= GD_OP_VERSION_3_7_10 && replica_count &&
- GLUSTERD_STATUS_STOPPED == volinfo->status) {
+ if (GLUSTERD_STATUS_STOPPED == volinfo->status &&
+ conf->op_version >= GD_OP_VERSION_3_7_10) {
ret = -1;
snprintf(msg, sizeof(msg),
" Volume must not be in"
" stopped state when replica-count needs to "
" be increased.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
msg);
*op_errstr = gf_strdup(msg);
goto out;
@@ -1419,25 +1413,31 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
/* op-version check for replica 2 to arbiter conversion. If we
* don't have this check, an older peer added as arbiter brick
* will not have the arbiter xlator in its volfile. */
- if ((conf->op_version < GD_OP_VERSION_3_8_0) && (arbiter_count == 1) &&
- (replica_count == 3)) {
- ret = -1;
- snprintf(msg, sizeof(msg),
- "Cluster op-version must "
- "be >= 30800 to add arbiter brick to a "
- "replica 2 volume.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
- msg);
- *op_errstr = gf_strdup(msg);
- goto out;
+ if ((replica_count == 3) && (conf->op_version < GD_OP_VERSION_3_8_0)) {
+ ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
+ &arbiter_count);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "No arbiter count present in the dict");
+ } else if (arbiter_count == 1) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Cluster op-version must "
+ "be >= 30800 to add arbiter brick to a "
+ "replica 2 volume.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
}
/* Do not allow increasing replica count for arbiter volumes. */
- if (replica_count && volinfo->arbiter_count) {
+ if (volinfo->arbiter_count) {
ret = -1;
snprintf(msg, sizeof(msg),
"Increasing replica count "
"for arbiter volumes is not supported.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
msg);
*op_errstr = gf_strdup(msg);
goto out;
@@ -1451,7 +1451,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
* doing this check at the originator node is sufficient.
*/
- if (is_origin_glusterd(dict) && !is_force) {
+ if (!is_force && is_origin_glusterd(dict)) {
ret = 0;
if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
gf_msg_debug(this->name, 0,
@@ -1459,15 +1459,18 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
"found. Checking brick order.");
if (replica_count)
ret = glusterd_check_brick_order(dict, msg, volinfo->type,
+ &volname, &bricks, &count,
replica_count);
else
ret = glusterd_check_brick_order(dict, msg, volinfo->type,
+ &volname, &bricks, &count,
volinfo->replica_count);
} else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
gf_msg_debug(this->name, 0,
"Disperse cluster type"
" found. Checking brick order.");
- ret = glusterd_check_brick_order(dict, msg, volinfo->type,
+ ret = glusterd_check_brick_order(dict, msg, volinfo->type, &volname,
+ &bricks, &count,
volinfo->disperse_count);
}
if (ret) {
@@ -1496,7 +1499,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (len < 0) {
strcpy(msg, "<error>");
}
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
msg);
*op_errstr = gf_strdup(msg);
goto out;
@@ -1528,7 +1531,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
"Volume name %s rebalance is in "
"progress. Please retry after completion",
volname);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg);
*op_errstr = gf_strdup(msg);
ret = -1;
goto out;
@@ -1546,18 +1549,22 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
msg[0] = '\0';
}
- ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get count");
- goto out;
+ if (!count) {
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get count");
+ goto out;
+ }
}
- ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get bricks");
- goto out;
+ if (!bricks) {
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get bricks");
+ goto out;
+ }
}
if (bricks) {
@@ -1576,7 +1583,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
"brick path %s is "
"too long",
brick);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s",
msg);
*op_errstr = gf_strdup(msg);
@@ -1587,7 +1594,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true,
NULL);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
"Add-brick: Unable"
" to get brickinfo");
goto out;
@@ -1657,7 +1664,7 @@ out:
GF_FREE(str_ret);
GF_FREE(all_bricks);
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
@@ -2227,6 +2234,42 @@ out:
}
int
+glusterd_post_commit_add_brick(dict_t *dict, char **op_errstr)
+{
+ int ret = 0;
+ char *volname = NULL;
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+ ret = glusterd_replace_old_auth_allow_list(volname);
+out:
+ return ret;
+}
+
+int
+glusterd_post_commit_replace_brick(dict_t *dict, char **op_errstr)
+{
+ int ret = 0;
+ char *volname = NULL;
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+ ret = glusterd_replace_old_auth_allow_list(volname);
+out:
+ return ret;
+}
+
+int
glusterd_set_rebalance_id_for_remove_brick(dict_t *req_dict, dict_t *rsp_dict)
{
int ret = -1;
diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
index 2d60daf180a..f08bd6cebee 100644
--- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c
+++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c
@@ -421,6 +421,35 @@ check_host_list(void)
}
int
+gd_ganesha_send_dbus(char *volname, char *value)
+{
+ runner_t runner = {
+ 0,
+ };
+ int ret = -1;
+ runinit(&runner);
+
+ GF_VALIDATE_OR_GOTO("glusterd-ganesha", volname, out);
+ GF_VALIDATE_OR_GOTO("glusterd-ganesha", value, out);
+
+ ret = 0;
+ if (check_host_list()) {
+ /* Check whether ganesha is running on this node */
+ if (manage_service("status")) {
+ gf_msg("glusterd-ganesha", GF_LOG_WARNING, 0,
+ GD_MSG_GANESHA_NOT_RUNNING,
+ "Export failed, NFS-Ganesha is not running");
+ } else {
+ runner_add_args(&runner, GANESHA_PREFIX "/dbus-send.sh", CONFDIR,
+ value, volname, NULL);
+ ret = runner_run(&runner);
+ }
+ }
+out:
+ return ret;
+}
+
+int
manage_export_config(char *volname, char *value, char **op_errstr)
{
runner_t runner = {
@@ -447,9 +476,6 @@ int
ganesha_manage_export(dict_t *dict, char *value,
gf_boolean_t update_cache_invalidation, char **op_errstr)
{
- runner_t runner = {
- 0,
- };
int ret = -1;
glusterd_volinfo_t *volinfo = NULL;
dict_t *vol_opts = NULL;
@@ -458,7 +484,6 @@ ganesha_manage_export(dict_t *dict, char *value,
glusterd_conf_t *priv = NULL;
gf_boolean_t option = _gf_false;
- runinit(&runner);
this = THIS;
GF_ASSERT(this);
priv = this->private;
@@ -538,26 +563,13 @@ ganesha_manage_export(dict_t *dict, char *value,
goto out;
}
}
-
- if (check_host_list()) {
- /* Check whether ganesha is running on this node */
- if (manage_service("status")) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GANESHA_NOT_RUNNING,
- "Export failed, NFS-Ganesha is not running");
- } else {
- runner_add_args(&runner, GANESHA_PREFIX "/dbus-send.sh", CONFDIR,
- value, volname, NULL);
- ret = runner_run(&runner);
- if (ret) {
- gf_asprintf(op_errstr,
- "Dynamic export"
- " addition/deletion failed."
- " Please see log file for details");
- goto out;
- }
- }
+ ret = gd_ganesha_send_dbus(volname, value);
+ if (ret) {
+ gf_asprintf(op_errstr,
+ "Dynamic export addition/deletion failed."
+ " Please see log file for details");
+ goto out;
}
-
if (update_cache_invalidation) {
vol_opts = volinfo->dict;
ret = dict_set_dynstr_with_alloc(vol_opts,
diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
index b01fd4da24b..a0bfea41f0f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
@@ -310,7 +310,7 @@ glusterd_gfproxydsvc_start(glusterd_svc_t *svc, int flags)
}
runinit(&runner);
- if (this->ctx->cmd_args.valgrind) {
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s",
svc->proc.logdir, svc->proc.logfile);
if ((len < 0) || (len >= PATH_MAX)) {
@@ -318,8 +318,13 @@ glusterd_gfproxydsvc_start(glusterd_svc_t *svc, int flags)
goto out;
}
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
+
runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 7d488ffd87a..1b21c40596d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -5593,7 +5593,7 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
ret = dict_get_strn(dict, "filename", SLEN("filename"), &tmp_str);
if (ret) {
- now = time(NULL);
+ now = gf_time();
strftime(timestamp, sizeof(timestamp), "%Y%m%d_%H%M%S",
localtime(&now));
gf_asprintf(&filename, "%s_%s", "glusterd_state", timestamp);
diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c
index d18eb6b2f5e..61c0f1c946f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-hooks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c
@@ -206,11 +206,13 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner)
int i = 0;
int count = 0;
int ret = -1;
+ int flag = 0;
char query[1024] = {
0,
};
char *key = NULL;
char *value = NULL;
+ char *inet_family = NULL;
xlator_t *this = NULL;
this = THIS;
GF_ASSERT(this);
@@ -243,9 +245,23 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner)
continue;
runner_argprintf(runner, "%s=%s", key, value);
+ if ((strncmp(key, "cluster.enable-shared-storage",
+ SLEN("cluster.enable-shared-storage")) == 0 ||
+ strncmp(key, "enable-shared-storage",
+ SLEN("enable-shared-storage")) == 0) &&
+ strncmp(value, "enable", SLEN("enable")) == 0)
+ flag = 1;
}
glusterd_hooks_add_custom_args(dict, runner);
+ if (flag == 1) {
+ ret = dict_get_str_sizen(this->options, "transport.address-family",
+ &inet_family);
+ if (!ret) {
+ runner_argprintf(runner, "transport.address-family=%s",
+ inet_family);
+ }
+ }
ret = 0;
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-log-ops.c b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
index a800d9543cf..34abf35cb00 100644
--- a/xlators/mgmt/glusterd/src/glusterd-log-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
@@ -76,7 +76,7 @@ __glusterd_handle_log_rotate(rpcsvc_request_t *req)
"for volume %s",
volname);
- ret = dict_set_uint64(dict, "rotate-key", (uint64_t)time(NULL));
+ ret = dict_set_uint64(dict, "rotate-key", (uint64_t)gf_time());
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
index 17052cee263..d7257e1a7b5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mem-types.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
@@ -27,6 +27,7 @@ typedef enum gf_gld_mem_types_ {
gf_gld_mt_mop_stage_req_t,
gf_gld_mt_probe_ctx_t,
gf_gld_mt_glusterd_volinfo_t,
+ gf_gld_mt_volinfo_dict_data_t,
gf_gld_mt_glusterd_brickinfo_t,
gf_gld_mt_peer_hostname_t,
gf_gld_mt_defrag_info,
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h
index c0891797fdf..3a1e600fb03 100644
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h
@@ -319,7 +319,8 @@ GLFS_MSGID(
GD_MSG_SNAPSHOT_NOT_THIN_PROVISIONED, GD_MSG_VOL_STOP_ARGS_GET_FAILED,
GD_MSG_LSTAT_FAIL, GD_MSG_VOLUME_NOT_IMPORTED,
GD_MSG_ADD_BRICK_MNT_INFO_FAIL, GD_MSG_GET_MNT_ENTRY_INFO_FAIL,
- GD_MSG_QUORUM_CLUSTER_COUNT_GET_FAIL);
+ GD_MSG_QUORUM_CLUSTER_COUNT_GET_FAIL, GD_MSG_POST_COMMIT_OP_FAIL,
+ GD_MSG_POST_COMMIT_FROM_UUID_REJCT, GD_MSG_POST_COMMIT_REQ_SEND_FAIL);
#define GD_MSG_INVALID_ENTRY_STR "Invalid data entry"
#define GD_MSG_INVALID_ARGUMENT_STR \
@@ -447,4 +448,4 @@ GLFS_MSGID(
"Failed to allocate memory or get serialized length of dict"
#define GD_MSG_GET_XATTR_FAIL_STR "Failed to get extended attribute"
-#endif /* !_GLUSTERD_MESSAGES_H_ */ \ No newline at end of file
+#endif /* !_GLUSTERD_MESSAGES_H_ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
index c170827eec0..1069688a89d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -626,6 +626,136 @@ out:
}
static int
+glusterd_mgmt_v3_post_commit_send_resp(rpcsvc_request_t *req, int32_t op,
+ int32_t status, char *op_errstr,
+ uint32_t op_errno, dict_t *rsp_dict)
+{
+ gd1_mgmt_v3_post_commit_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op = op;
+ rsp.op_errno = op_errno;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
+ goto out;
+ }
+
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_rsp);
+
+ GF_FREE(rsp.dict.dict_val);
+out:
+ gf_msg_debug(this->name, 0, "Responded to post commit, ret: %d", ret);
+ return ret;
+}
+
+static int
+glusterd_handle_post_commit_fn(rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_post_commit_req op_req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ uint32_t op_errno = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode post commit "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
+ }
+
+ ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
+ goto out;
+ }
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_post_commit_fn(op_req.op, dict, &op_errstr, &op_errno,
+ rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "post commit failed on operation %s", gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_post_commit_send_resp(req, op_req.op, ret, op_errstr,
+ op_errno, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL,
+ "Failed to send post commit "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
+
+out:
+ if (op_errstr && (strcmp(op_errstr, "")))
+ GF_FREE(op_errstr);
+
+ free(op_req.dict.dict_val);
+
+ if (dict)
+ dict_unref(dict);
+
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+
+ /* Return 0 from handler to avoid double deletion of req obj */
+ return 0;
+}
+
+static int
glusterd_mgmt_v3_post_validate_send_resp(rpcsvc_request_t *req, int32_t op,
int32_t status, char *op_errstr,
dict_t *rsp_dict)
@@ -963,6 +1093,12 @@ glusterd_handle_commit(rpcsvc_request_t *req)
}
static int
+glusterd_handle_post_commit(rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler(req, glusterd_handle_post_commit_fn);
+}
+
+static int
glusterd_handle_post_validate(rpcsvc_request_t *req)
{
return glusterd_big_locked_handler(req, glusterd_handle_post_validate_fn);
@@ -986,6 +1122,9 @@ static rpcsvc_actor_t gd_svc_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
GLUSTERD_MGMT_V3_BRICK_OP, DRC_NA, 0},
[GLUSTERD_MGMT_V3_COMMIT] = {"COMMIT", glusterd_handle_commit, NULL,
GLUSTERD_MGMT_V3_COMMIT, DRC_NA, 0},
+ [GLUSTERD_MGMT_V3_POST_COMMIT] = {"POST_COMMIT",
+ glusterd_handle_post_commit, NULL,
+ GLUSTERD_MGMT_V3_POST_COMMIT, DRC_NA, 0},
[GLUSTERD_MGMT_V3_POST_VALIDATE] = {"POST_VAL",
glusterd_handle_post_validate, NULL,
GLUSTERD_MGMT_V3_POST_VALIDATE, DRC_NA,
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index b2128e10a04..bca7221062b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -86,6 +86,11 @@ gd_mgmt_v3_collate_errors(struct syncargs *args, int op_ret, int op_errno,
peer_str, err_string);
break;
}
+ case GLUSTERD_MGMT_V3_POST_COMMIT: {
+ snprintf(op_err, sizeof(op_err), "Post commit failed on %s. %s",
+ peer_str, err_string);
+ break;
+ }
case GLUSTERD_MGMT_V3_POST_VALIDATE: {
snprintf(op_err, sizeof(op_err),
"Post Validation failed on %s. %s", peer_str,
@@ -405,6 +410,47 @@ out:
}
int32_t
+gd_mgmt_v3_post_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ uint32_t *op_errno, dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
+ GF_ASSERT(rsp_dict);
+
+ switch (op) {
+ case GD_OP_ADD_BRICK:
+ ret = glusterd_post_commit_add_brick(dict, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Add-brick post commit failed.");
+ goto out;
+ }
+ break;
+ case GD_OP_REPLACE_BRICK:
+ ret = glusterd_post_commit_replace_brick(dict, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Replace-brick post commit failed.");
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+ gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
+
+int32_t
gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict,
char **op_errstr, dict_t *rsp_dict)
{
@@ -1720,6 +1766,274 @@ out:
}
int32_t
+gd_mgmt_v3_post_commit_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ gd1_mgmt_v3_post_commit_rsp rsp = {
+ {0},
+ };
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerid = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
+
+ ret = xdr_to_generic(*iov, &rsp,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_rsp);
+ if (ret < 0)
+ goto out;
+
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new();
+
+ ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
+ if (ret < 0) {
+ free(rsp.dict.dict_val);
+ goto out;
+ } else {
+ rsp_dict->extra_stdfree = rsp.dict.dict_val;
+ }
+ }
+
+ gf_uuid_copy(args->uuid, rsp.uuid);
+ pthread_mutex_lock(&args->lock_dict);
+ {
+ ret = glusterd_syncop_aggr_rsp_dict(rsp.op, args->dict, rsp_dict);
+ }
+ pthread_mutex_unlock(&args->lock_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ if (!rsp.op_ret)
+ op_ret = ret;
+ else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+ } else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+
+out:
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+
+ gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
+ GLUSTERD_MGMT_V3_POST_COMMIT, *peerid, rsp.uuid);
+ GF_FREE(peerid);
+
+ if (rsp.op_errstr)
+ free(rsp.op_errstr);
+
+ /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
+ * the caller function.
+ */
+ if (req->rpc_status != -1)
+ STACK_DESTROY(frame->root);
+ synctask_barrier_wake(args);
+ return 0;
+}
+
+int32_t
+gd_mgmt_v3_post_commit_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ gd_mgmt_v3_post_commit_cbk_fn);
+}
+
+int
+gd_mgmt_v3_post_commit_req(glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo, struct syncargs *args,
+ uuid_t my_uuid, uuid_t recv_uuid)
+{
+ int32_t ret = -1;
+ gd1_mgmt_v3_post_commit_req req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
+ goto out;
+ }
+
+ gf_uuid_copy(req.uuid, my_uuid);
+ req.op = op;
+
+ GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL);
+ goto out;
+ }
+
+ ret = gd_syncop_submit_request(
+ peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_POST_COMMIT, gd_mgmt_v3_post_commit_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_req);
+out:
+ GF_FREE(req.dict.dict_val);
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_mgmt_v3_post_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, uint32_t *op_errno,
+ uint32_t txn_generation)
+{
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(req_dict);
+ GF_ASSERT(op_errstr);
+ GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Post commit on local node */
+ ret = gd_mgmt_v3_post_commit_fn(op, req_dict, op_errstr, op_errno,
+ rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Post commit failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf(op_errstr,
+ "Post commit failed "
+ "on localhost. Please "
+ "check log file for details.");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ ret = glusterd_syncop_aggr_rsp_dict(op, op_ctx, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ goto out;
+ }
+
+ dict_unref(rsp_dict);
+ rsp_dict = NULL;
+
+ /* Sending post commit req to other nodes in the cluster */
+ gd_syncargs_init(&args, op_ctx);
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+ peer_cnt = 0;
+
+ RCU_READ_LOCK;
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+ if (!peerinfo->connected)
+ continue;
+
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ gd_mgmt_v3_post_commit_req(op, req_dict, peerinfo, &args, MY_UUID,
+ peer_uuid);
+ peer_cnt++;
+ }
+ RCU_READ_UNLOCK;
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
+
+ gd_synctask_barrier_wait((&args), peer_cnt);
+
+ if (args.op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Post commit failed on peers");
+
+ if (args.errstr)
+ *op_errstr = gf_strdup(args.errstr);
+ }
+
+ ret = args.op_ret;
+ *op_errno = args.op_errno;
+
+ gf_msg_debug(this->name, 0,
+ "Sent post commit req for %s to %d "
+ "peers. Returning %d",
+ gd_op_list[op], peer_cnt, ret);
+out:
+ glusterd_op_modify_op_ctx(op, op_ctx);
+ return ret;
+}
+
+int32_t
gd_mgmt_v3_post_validate_cbk_fn(struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
@@ -2408,6 +2722,15 @@ glusterd_mgmt_v3_initiate_all_phases(rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
+ /* POST COMMIT OP PHASE */
+ ret = glusterd_mgmt_v3_post_commit(op, dict, req_dict, &op_errstr,
+ &op_errno, txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
+ "Post commit Op Failed");
+ goto out;
+ }
+
/* POST-COMMIT VALIDATE PHASE */
/* As of now, post_validate is not trying to cleanup any failed
commands. So as of now, I am sending 0 (op_ret as 0).
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
index 71f793d0397..27dd1849519 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
@@ -28,6 +28,10 @@ gd_mgmt_v3_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
uint32_t *op_errno, dict_t *rsp_dict);
int32_t
+gd_mgmt_v3_post_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ uint32_t *op_errno, dict_t *rsp_dict);
+
+int32_t
gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict,
char **op_errstr, dict_t *rsp_dict);
@@ -84,4 +88,10 @@ glusterd_reset_brick_prevalidate(dict_t *dict, char **op_errstr,
dict_t *rsp_dict);
int
glusterd_op_reset_brick(dict_t *dict, dict_t *rsp_dict);
+
+int
+glusterd_post_commit_add_brick(dict_t *dict, char **op_errstr);
+
+int
+glusterd_post_commit_replace_brick(dict_t *dict, char **op_errstr);
#endif /* _GLUSTERD_MGMT_H_ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
index 2afd0fe1b74..458bf168ede 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c
@@ -219,6 +219,9 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
char valgrind_logfile[PATH_MAX] = {
0,
};
+ char msg[1024] = {
+ 0,
+ };
char *volfileserver = NULL;
char *localtime_logging = NULL;
@@ -270,12 +273,17 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
"rebalance");
runinit(&runner);
- if (this->ctx->cmd_args.valgrind) {
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s-rebalance.log",
priv->logdir, volinfo->volname);
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
+
runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
@@ -316,6 +324,10 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr,
runner_add_arg(&runner, "--localtime-logging");
}
+ snprintf(msg, sizeof(msg), "Starting the rebalance service for volume %s",
+ volinfo->volname);
+ runner_log(&runner, this->name, GF_LOG_DEBUG, msg);
+
ret = runner_run_nowait(&runner);
if (ret) {
gf_msg_debug("glusterd", 0, "rebalance command failed");
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
index 1f3f4909cbb..d75f249b29e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c
@@ -304,7 +304,7 @@ glusterd_snapdsvc_start(glusterd_svc_t *svc, int flags)
}
runinit(&runner);
- if (this->ctx->cmd_args.valgrind) {
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-snapd.log",
svc->proc.logdir);
if ((len < 0) || (len >= PATH_MAX)) {
@@ -313,8 +313,13 @@ glusterd_snapdsvc_start(glusterd_svc_t *svc, int flags)
goto out;
}
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
+
runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index d96d5dd2cfc..995268b796d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -2037,8 +2037,9 @@ glusterd_update_snaps_synctask(void *opaque)
"Failed to remove snap %s", snap->snapname);
goto out;
}
- if (dict)
- dict_unref(dict);
+
+ dict_unref(dict);
+ dict = NULL;
}
snprintf(buf, sizeof(buf), "%s.accept_peer_data", prefix);
ret = dict_get_int32(peer_data, buf, &val);
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index c2428dc0de0..aeaa8d15214 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -3930,7 +3930,8 @@ glusterd_handle_snapshot_create(rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
- ret = dict_set_int64(dict, "snap-time", (int64_t)time(&snap_time));
+ snap_time = gf_time();
+ ret = dict_set_int64(dict, "snap-time", (int64_t)snap_time);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
"Unable to set snap-time");
@@ -5322,6 +5323,48 @@ glusterd_do_snap_vol(glusterd_volinfo_t *origin_vol, glusterd_snap_t *snap,
dict_deln(snap_vol->dict, "features.barrier", SLEN("features.barrier"));
gd_update_volume_op_versions(snap_vol);
+ /* *
+ * Create the export file from the node where ganesha.enable "on"
+ * is executed
+ * */
+ if (glusterd_is_ganesha_cluster() &&
+ glusterd_check_ganesha_export(snap_vol)) {
+ if (is_origin_glusterd(dict)) {
+ ret = manage_export_config(clonename, "on", NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Failed to create"
+ "export file for NFS-Ganesha\n");
+ goto out;
+ }
+ }
+
+ ret = dict_set_dynstr_with_alloc(snap_vol->dict,
+ "features.cache-invalidation", "on");
+ ret = gd_ganesha_send_dbus(clonename, "on");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Dynamic export addition/deletion failed."
+ " Please see log file for details. Clone name = %s",
+ clonename);
+ goto out;
+ }
+ }
+ if (!glusterd_is_ganesha_cluster() &&
+ glusterd_check_ganesha_export(snap_vol)) {
+ /* This happens when a snapshot was created when Ganesha was
+ * enabled globally. Then Ganesha disabled from the cluster.
+ * In such cases, we will have the volume level option set
+ * on dict, So we have to disable it as it doesn't make sense
+ * to keep the option.
+ */
+
+ ret = dict_set_dynstr(snap_vol->dict, "ganesha.enable", "off");
+ if (ret)
+ goto out;
+ }
+
ret = glusterd_store_volinfo(snap_vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_SET_FAIL,
@@ -5393,8 +5436,31 @@ out:
for (i = 0; unsupported_opt[i].key; i++)
GF_FREE(unsupported_opt[i].value);
- if (snap_vol)
+ if (snap_vol) {
+ if (glusterd_is_ganesha_cluster() &&
+ glusterd_check_ganesha_export(snap_vol)) {
+ if (is_origin_glusterd(dict)) {
+ ret = manage_export_config(clonename, "on", NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Failed to create"
+ "export file for NFS-Ganesha\n");
+ }
+ }
+
+ ret = gd_ganesha_send_dbus(clonename, "off");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_EXPORT_FILE_CREATE_FAIL,
+ "Dynamic export addition/deletion failed."
+ " Please see log file for details. Clone name = %s",
+ clonename);
+ }
+ }
+
glusterd_snap_volume_remove(rsp_dict, snap_vol, _gf_true, _gf_true);
+ }
snap_vol = NULL;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index 465e41ef00b..d94dceb10b7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -74,7 +74,7 @@ glusterd_replace_slash_with_hyphen(char *str)
while (ptr) {
*ptr = '-';
- ptr = strchr(str, '/');
+ ptr = strchr(ptr, '/');
}
}
@@ -660,85 +660,72 @@ out:
}
static int
-_storeslaves(dict_t *this, char *key, data_t *value, void *data)
-{
- int32_t ret = 0;
- gf_store_handle_t *shandle = NULL;
- xlator_t *xl = NULL;
-
- xl = THIS;
- GF_ASSERT(xl);
-
- shandle = (gf_store_handle_t *)data;
-
- GF_ASSERT(shandle);
- GF_ASSERT(shandle->fd > 0);
- GF_ASSERT(shandle->path);
- GF_ASSERT(key);
- GF_ASSERT(value);
- GF_ASSERT(value->data);
-
- gf_msg_debug(xl->name, 0, "Storing in volinfo:key= %s, val=%s", key,
- value->data);
-
- ret = gf_store_save_value(shandle->fd, key, (char *)value->data);
- if (ret) {
- gf_msg(xl->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_WRITE_FAIL,
- "Unable to write into store"
- " handle for path: %s",
- shandle->path);
- return -1;
- }
- return 0;
-}
-
-int
-_storeopts(dict_t *this, char *key, data_t *value, void *data)
+_storeopts(dict_t *dict_value, char *key, data_t *value, void *data)
{
int32_t ret = 0;
int32_t exists = 0;
+ int32_t option_len = 0;
gf_store_handle_t *shandle = NULL;
- xlator_t *xl = NULL;
+ glusterd_volinfo_data_store_t *dict_data = NULL;
+ xlator_t *this = NULL;
- xl = THIS;
- GF_ASSERT(xl);
+ this = THIS;
+ GF_ASSERT(this);
- shandle = (gf_store_handle_t *)data;
+ dict_data = (glusterd_volinfo_data_store_t *)data;
+ shandle = dict_data->shandle;
GF_ASSERT(shandle);
GF_ASSERT(shandle->fd > 0);
- GF_ASSERT(shandle->path);
GF_ASSERT(key);
GF_ASSERT(value);
GF_ASSERT(value->data);
- if (is_key_glusterd_hooks_friendly(key)) {
- exists = 1;
+ if (dict_data->key_check == 1) {
+ if (is_key_glusterd_hooks_friendly(key)) {
+ exists = 1;
- } else {
- exists = glusterd_check_option_exists(key, NULL);
+ } else {
+ exists = glusterd_check_option_exists(key, NULL);
+ }
}
-
- if (1 == exists) {
- gf_msg_debug(xl->name, 0,
- "Storing in volinfo:key= %s, "
+ if (exists == 1 || dict_data->key_check == 0) {
+ gf_msg_debug(this->name, 0,
+ "Storing in buffer for volinfo:key= %s, "
"val=%s",
key, value->data);
-
} else {
- gf_msg_debug(xl->name, 0, "Discarding:key= %s, val=%s", key,
+ gf_msg_debug(this->name, 0, "Discarding:key= %s, val=%s", key,
value->data);
return 0;
}
- ret = gf_store_save_value(shandle->fd, key, (char *)value->data);
- if (ret) {
- gf_msg(xl->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_WRITE_FAIL,
- "Unable to write into store"
- " handle for path: %s",
- shandle->path);
+ /*
+ * The option_len considers the length of the key value
+ * pair and along with that '=' and '\n', but as value->len
+ * already considers a NULL at the end of the data, adding
+ * just 1.
+ */
+ option_len = strlen(key) + value->len + 1;
+
+ if ((VOLINFO_BUFFER_SIZE - dict_data->buffer_len - 1) < option_len) {
+ ret = gf_store_save_items(shandle->fd, dict_data->buffer);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, NULL);
+ return -1;
+ }
+ dict_data->buffer_len = 0;
+ dict_data->buffer[0] = '\0';
+ }
+ ret = snprintf(dict_data->buffer + dict_data->buffer_len, option_len + 1,
+ "%s=%s\n", key, value->data);
+ if (ret < 0 || ret > option_len + 1) {
+ gf_smsg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_COPY_FAIL, NULL);
return -1;
}
+
+ dict_data->buffer_len += ret;
+
return 0;
}
@@ -1013,7 +1000,7 @@ glusterd_store_create_snap_dir(glusterd_snap_t *snap)
return ret;
}
-int32_t
+static int32_t
glusterd_store_volinfo_write(int fd, glusterd_volinfo_t *volinfo)
{
int32_t ret = -1;
@@ -1021,19 +1008,47 @@ glusterd_store_volinfo_write(int fd, glusterd_volinfo_t *volinfo)
GF_ASSERT(fd > 0);
GF_ASSERT(volinfo);
GF_ASSERT(volinfo->shandle);
+ xlator_t *this = NULL;
+ glusterd_volinfo_data_store_t *dict_data = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
shandle = volinfo->shandle;
+
+ dict_data = GF_CALLOC(1, sizeof(glusterd_volinfo_data_store_t),
+ gf_gld_mt_volinfo_dict_data_t);
+ if (dict_data == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL);
+ return -1;
+ }
+
ret = glusterd_volume_exclude_options_write(fd, volinfo);
- if (ret)
+ if (ret) {
goto out;
+ }
+
+ dict_data->shandle = shandle;
+ dict_data->key_check = 1;
shandle->fd = fd;
- dict_foreach(volinfo->dict, _storeopts, shandle);
+ dict_foreach(volinfo->dict, _storeopts, (void *)dict_data);
+
+ dict_data->key_check = 0;
+ dict_foreach(volinfo->gsync_slaves, _storeopts, (void *)dict_data);
+
+ if (dict_data->buffer_len > 0) {
+ ret = gf_store_save_items(fd, dict_data->buffer);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, NULL);
+ goto out;
+ }
+ }
- dict_foreach(volinfo->gsync_slaves, _storeslaves, shandle);
shandle->fd = 0;
out:
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ GF_FREE(dict_data);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
@@ -1274,14 +1289,6 @@ out:
return ret;
}
-static int
-_gd_store_rebalance_dict(dict_t *dict, char *key, data_t *value, void *data)
-{
- int fd = *(int *)data;
-
- return gf_store_save_value(fd, key, value->data);
-}
-
int32_t
glusterd_store_node_state_write(int fd, glusterd_volinfo_t *volinfo)
{
@@ -1289,6 +1296,12 @@ glusterd_store_node_state_write(int fd, glusterd_volinfo_t *volinfo)
char buf[PATH_MAX];
char uuid[UUID_SIZE + 1];
uint total_len = 0;
+ glusterd_volinfo_data_store_t *dict_data = NULL;
+ gf_store_handle_t shandle;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
GF_ASSERT(fd > 0);
GF_ASSERT(volinfo);
@@ -1328,14 +1341,33 @@ glusterd_store_node_state_write(int fd, glusterd_volinfo_t *volinfo)
}
ret = gf_store_save_items(fd, buf);
- if (ret)
+ if (ret) {
goto out;
+ }
if (volinfo->rebal.dict) {
- dict_foreach(volinfo->rebal.dict, _gd_store_rebalance_dict, &fd);
+ dict_data = GF_CALLOC(1, sizeof(glusterd_volinfo_data_store_t),
+ gf_gld_mt_volinfo_dict_data_t);
+ if (dict_data == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL);
+ return -1;
+ }
+ dict_data->shandle = &shandle;
+ shandle.fd = fd;
+ dict_foreach(volinfo->rebal.dict, _storeopts, (void *)dict_data);
+ if (dict_data->buffer_len > 0) {
+ ret = gf_store_save_items(fd, dict_data->buffer);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED,
+ NULL);
+ goto out;
+ ;
+ }
+ }
}
out:
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ GF_FREE(dict_data);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
@@ -2309,7 +2341,7 @@ glusterd_store_retrieve_snapd(glusterd_volinfo_t *volinfo)
ret = 0;
out:
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -2642,7 +2674,7 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
brick_count++;
}
- if (gf_store_iter_destroy(tmpiter)) {
+ if (gf_store_iter_destroy(&tmpiter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -2823,13 +2855,13 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo)
ret = 0;
out:
- if (gf_store_iter_destroy(tmpiter)) {
+ if (gf_store_iter_destroy(&tmpiter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
}
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -2962,7 +2994,7 @@ glusterd_store_retrieve_node_state(glusterd_volinfo_t *volinfo)
ret = 0;
out:
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -3238,7 +3270,7 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo)
ret = 0;
out:
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -3343,20 +3375,6 @@ glusterd_store_set_options_path(glusterd_conf_t *conf, char *path, size_t len)
snprintf(path, len, "%s/options", conf->workdir);
}
-int
-_store_global_opts(dict_t *this, char *key, data_t *value, void *data)
-{
- gf_store_handle_t *shandle = data;
-
- if (gf_store_save_value(shandle->fd, key, (char *)value->data)) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_WRITE_FAIL,
- "Unable to write into store handle for key : %s, value %s", key,
- (char *)value->data);
- }
-
- return 0;
-}
-
int32_t
glusterd_store_options(xlator_t *this, dict_t *opts)
{
@@ -3365,13 +3383,15 @@ glusterd_store_options(xlator_t *this, dict_t *opts)
char path[PATH_MAX] = {0};
int fd = -1;
int32_t ret = -1;
+ glusterd_volinfo_data_store_t *dict_data = NULL;
conf = this->private;
glusterd_store_set_options_path(conf, path, sizeof(path));
ret = gf_store_handle_new(path, &shandle);
- if (ret)
+ if (ret) {
goto out;
+ }
fd = gf_store_mkstemp(shandle);
if (fd <= 0) {
@@ -3379,15 +3399,30 @@ glusterd_store_options(xlator_t *this, dict_t *opts)
goto out;
}
+ dict_data = GF_CALLOC(1, sizeof(glusterd_volinfo_data_store_t),
+ gf_gld_mt_volinfo_dict_data_t);
+ if (dict_data == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL);
+ return -1;
+ }
+ dict_data->shandle = shandle;
shandle->fd = fd;
- dict_foreach(opts, _store_global_opts, shandle);
- shandle->fd = 0;
+ dict_foreach(opts, _storeopts, (void *)dict_data);
+ if (dict_data->buffer_len > 0) {
+ ret = gf_store_save_items(fd, dict_data->buffer);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, NULL);
+ goto out;
+ }
+ }
+
ret = gf_store_rename_tmppath(shandle);
- if (ret)
- goto out;
out:
- if ((ret < 0) && (fd > 0))
+ shandle->fd = 0;
+ GF_FREE(dict_data);
+ if ((ret < 0) && (fd > 0)) {
gf_store_unlink_tmppath(shandle);
+ }
gf_store_handle_destroy(shandle);
return ret;
}
@@ -3433,7 +3468,7 @@ glusterd_store_retrieve_options(xlator_t *this)
goto out;
ret = 0;
out:
- (void)gf_store_iter_destroy(iter);
+ (void)gf_store_iter_destroy(&iter);
gf_store_handle_destroy(shandle);
return ret;
}
@@ -3883,7 +3918,7 @@ glusterd_store_update_snap(glusterd_snap_t *snap)
ret = 0;
out:
- if (gf_store_iter_destroy(iter)) {
+ if (gf_store_iter_destroy(&iter)) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL,
"Failed to destroy store iter");
ret = -1;
@@ -4625,7 +4660,7 @@ glusterd_store_retrieve_peers(xlator_t *this)
is_ok = _gf_true;
next:
- (void)gf_store_iter_destroy(iter);
+ (void)gf_store_iter_destroy(&iter);
if (!is_ok) {
gf_log(this->name, GF_LOG_WARNING,
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h
index 04070549678..83f4df0783e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.h
+++ b/xlators/mgmt/glusterd/src/glusterd-store.h
@@ -29,7 +29,7 @@ typedef enum glusterd_store_ver_ac_ {
} glusterd_volinfo_ver_ac_t;
#define UUID_SIZE 36
-
+#define VOLINFO_BUFFER_SIZE 4093
#define GLUSTERD_STORE_UUID_KEY "UUID"
#define GLUSTERD_STORE_KEY_VOL_TYPE "type"
@@ -112,6 +112,19 @@ typedef enum glusterd_store_ver_ac_ {
#define GLUSTERD_STORE_KEY_GANESHA_GLOBAL "nfs-ganesha"
+/*
+ * The structure is responsible for handling the parameter for writes into
+ * the buffer before it is finally written to the file. The writes will be
+ * of the form of key-value pairs.
+ */
+struct glusterd_volinfo_data_store_ {
+ gf_store_handle_t *shandle; /*Contains fd and path of the file */
+ int16_t buffer_len;
+ char key_check; /* flag to check if key is to be validated before write*/
+ char buffer[VOLINFO_BUFFER_SIZE];
+};
+typedef struct glusterd_volinfo_data_store_ glusterd_volinfo_data_store_t;
+
int32_t
glusterd_store_volinfo(glusterd_volinfo_t *volinfo,
glusterd_volinfo_ver_ac_t ac);
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
index 99119d69e45..18b3fb13630 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c
@@ -162,6 +162,9 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline)
char *localtime_logging = NULL;
char *log_level = NULL;
char daemon_log_level[30] = {0};
+ char msg[1024] = {
+ 0,
+ };
int32_t len = 0;
this = THIS;
@@ -187,7 +190,7 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline)
runinit(&runner);
- if (this->ctx->cmd_args.valgrind) {
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log",
svc->proc.logdir, svc->name);
if ((len < 0) || (len >= PATH_MAX)) {
@@ -195,9 +198,13 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline)
goto unlock;
}
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes",
- NULL);
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
+
runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
@@ -226,8 +233,8 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline)
if (cmdline)
dict_foreach(cmdline, svc_add_args, (void *)&runner);
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_START_SUCCESS,
- "Starting %s service", svc->name);
+ snprintf(msg, sizeof(msg), "Starting %s service", svc->name);
+ runner_log(&runner, this->name, GF_LOG_DEBUG, msg);
if (flags == PROC_START_NO_WAIT) {
ret = runner_run_nowait(&runner);
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 7d38b0a42d7..90ef2cf4c9c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -79,6 +79,14 @@
#include <sys/sockio.h>
#endif
+#ifdef __FreeBSD__
+#include <sys/sysctl.h>
+#include <sys/param.h>
+#include <sys/queue.h>
+#include <libprocstat.h>
+#include <libutil.h>
+#endif
+
#define NFS_PROGRAM 100003
#define NFSV3_VERSION 3
@@ -1117,7 +1125,8 @@ glusterd_get_brick_mount_dir(char *brickpath, char *hostname, char *mount_dir)
}
brick_dir = &brickpath[strlen(mnt_pt)];
- brick_dir++;
+ if (brick_dir[0] == '/')
+ brick_dir++;
snprintf(mount_dir, VALID_GLUSTERD_PATHMAX, "/%s", brick_dir);
}
@@ -2068,8 +2077,8 @@ glusterd_volume_start_glusterfs(glusterd_volinfo_t *volinfo,
retry:
runinit(&runner);
- if (this->ctx->cmd_args.valgrind) {
- /* Run bricks with valgrind */
+ if (this->ctx->cmd_args.vgtool != _gf_none) {
+ /* Run bricks with valgrind. */
if (volinfo->logdir) {
len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s-%s.log",
volinfo->logdir, volinfo->volname, exp_path);
@@ -2083,8 +2092,13 @@ retry:
goto out;
}
- runner_add_args(&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes", NULL);
+ if (this->ctx->cmd_args.vgtool == _gf_memcheck)
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes",
+ NULL);
+ else
+ runner_add_args(&runner, "valgrind", "--tool=drd", NULL);
+
runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
}
@@ -2197,7 +2211,7 @@ retry:
if (is_brick_mx_enabled())
runner_add_arg(&runner, "--brick-mux");
- runner_log(&runner, "", 0, "Starting GlusterFS");
+ runner_log(&runner, "", GF_LOG_DEBUG, "Starting GlusterFS");
brickinfo->port = port;
brickinfo->rdma_port = rdma_port;
@@ -2206,7 +2220,10 @@ retry:
if (wait) {
synclock_unlock(&priv->big_lock);
+ errno = 0;
ret = runner_run(&runner);
+ if (errno != 0)
+ ret = errno;
synclock_lock(&priv->big_lock);
if (ret == EADDRINUSE) {
@@ -2788,6 +2805,15 @@ glusterd_volume_compute_cksum(glusterd_volinfo_t *volinfo, char *cksum_path,
ret = -1;
goto out;
}
+ } else if (priv->op_version < GD_OP_VERSION_7_0) {
+ ret = get_checksum_for_path(filepath, &cksum, priv->op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_GET_FAIL,
+ "unable to get "
+ "checksum for path: %s",
+ filepath);
+ goto out;
+ }
}
ret = get_checksum_for_file(fd, &cksum, priv->op_version);
@@ -6083,7 +6109,6 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path,
GF_ATOMIC_INC(conf->blockers);
ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0,
iobref, frame, NULL, 0, NULL, 0, NULL);
- return ret;
free_iobref:
iobref_unref(iobref);
@@ -6092,7 +6117,7 @@ maybe_free_iobuf:
iobuf_unref(iobuf);
}
err:
- return -1;
+ return ret;
}
extern size_t
@@ -6420,7 +6445,6 @@ find_compatible_brick(glusterd_conf_t *conf, glusterd_volinfo_t *volinfo,
int
glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
{
- char fname[128] = "";
char buf[1024] = "";
char cmdline[2048] = "";
xlator_t *this = NULL;
@@ -6435,6 +6459,22 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
this = THIS;
GF_ASSERT(this);
+#ifdef __FreeBSD__
+ blen = sizeof(buf);
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_ARGS;
+ mib[3] = pid;
+
+ if (sys_sysctl(mib, 4, buf, &blen, NULL, blen) != 0) {
+ gf_log(this->name, GF_LOG_ERROR, "brick process %d is not running",
+ pid);
+ return ret;
+ }
+#else
+ char fname[128] = "";
snprintf(fname, sizeof(fname), "/proc/%d/cmdline", pid);
if (sys_access(fname, R_OK) != 0) {
@@ -6451,6 +6491,7 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
strerror(errno), fname);
return ret;
}
+#endif
/* convert cmdline to single string */
for (i = 0, j = 0; i < blen; i++) {
@@ -6499,6 +6540,43 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len)
char *
search_brick_path_from_proc(pid_t brick_pid, char *brickpath)
{
+ char *brick_path = NULL;
+#ifdef __FreeBSD__
+ struct filestat *fst;
+ struct procstat *ps;
+ struct kinfo_proc *kp;
+ struct filestat_list *head;
+
+ ps = procstat_open_sysctl();
+ if (ps == NULL)
+ goto out;
+
+ kp = kinfo_getproc(brick_pid);
+ if (kp == NULL)
+ goto out;
+
+ head = procstat_getfiles(ps, (void *)kp, 0);
+ if (head == NULL)
+ goto out;
+
+ STAILQ_FOREACH(fst, head, next)
+ {
+ if (fst->fs_fd < 0)
+ continue;
+
+ if (!strcmp(fst->fs_path, brickpath)) {
+ brick_path = gf_strdup(fst->fs_path);
+ break;
+ }
+ }
+
+out:
+ if (head != NULL)
+ procstat_freefiles(ps, head);
+ if (kp != NULL)
+ free(kp);
+ procstat_close(ps);
+#else
struct dirent *dp = NULL;
DIR *dirp = NULL;
size_t len = 0;
@@ -6509,7 +6587,6 @@ search_brick_path_from_proc(pid_t brick_pid, char *brickpath)
0,
},
};
- char *brick_path = NULL;
if (!brickpath)
goto out;
@@ -6547,6 +6624,7 @@ search_brick_path_from_proc(pid_t brick_pid, char *brickpath)
out:
if (dirp)
sys_closedir(dirp);
+#endif
return brick_path;
}
@@ -8417,7 +8495,8 @@ glusterd_sm_tr_log_transition_add(glusterd_sm_tr_log_t *log, int old_state,
transitions[next].old_state = old_state;
transitions[next].new_state = new_state;
transitions[next].event = event;
- time(&transitions[next].time);
+ transitions[next].time = gf_time();
+
log->current = next;
if (log->count < log->size)
log->count++;
@@ -14632,7 +14711,8 @@ glusterd_compare_addrinfo(struct addrinfo *first, struct addrinfo *next)
*/
int32_t
glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
- int32_t sub_count)
+ char **volname, char **brick_list,
+ int32_t *brick_count, int32_t sub_count)
{
int ret = -1;
int i = 0;
@@ -14643,12 +14723,9 @@ glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
addrinfo_list_t *ai_list_tmp1 = NULL;
addrinfo_list_t *ai_list_tmp2 = NULL;
char *brick = NULL;
- char *brick_list = NULL;
char *brick_list_dup = NULL;
char *brick_list_ptr = NULL;
char *tmpptr = NULL;
- char *volname = NULL;
- int32_t brick_count = 0;
struct addrinfo *ai_info = NULL;
char brick_addr[128] = {
0,
@@ -14676,32 +14753,38 @@ glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
ai_list->info = NULL;
CDS_INIT_LIST_HEAD(&ai_list->list);
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
- goto out;
+ if (!(*volname)) {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &(*volname));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
}
- ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &brick_list);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Bricks check : Could not "
- "retrieve bricks list");
- goto out;
+ if (!(*brick_list)) {
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &(*brick_list));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Bricks check : Could not "
+ "retrieve bricks list");
+ goto out;
+ }
}
- ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Bricks check : Could not "
- "retrieve brick count");
- goto out;
+ if (!(*brick_count)) {
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &(*brick_count));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Bricks check : Could not "
+ "retrieve brick count");
+ goto out;
+ }
}
- brick_list_dup = brick_list_ptr = gf_strdup(brick_list);
+ brick_list_dup = brick_list_ptr = gf_strdup(*brick_list);
/* Resolve hostnames and get addrinfo */
- while (i < brick_count) {
+ while (i < *brick_count) {
++i;
brick = strtok_r(brick_list_dup, " \n", &tmpptr);
brick_list_dup = tmpptr;
@@ -14737,8 +14820,12 @@ glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
i = 0;
ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list);
+ if (*brick_count < sub_count) {
+ sub_count = *brick_count;
+ }
+
/* Check for bad brick order */
- while (i < brick_count) {
+ while (i < *brick_count) {
++i;
ai_info = ai_list_tmp1->info;
ai_list_tmp1 = cds_list_entry(ai_list_tmp1->list.next, addrinfo_list_t,
@@ -14901,3 +14988,59 @@ out:
GF_FREE(new_auth_allow_list);
return;
}
+
+int
+glusterd_replace_old_auth_allow_list(char *volname)
+{
+ int ret = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ char *old_auth_allow_list = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_VALIDATE_OR_GOTO(this->name, volname, out);
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Unable to find volume: %s", volname);
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(volinfo->dict, "old.auth.allow",
+ &old_auth_allow_list);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
+ "old auth allow list is not set, no need to replace the list");
+ ret = 0;
+ goto out;
+ }
+
+ dict_del_sizen(volinfo->dict, "auth.allow");
+ ret = dict_set_strn(volinfo->dict, "auth.allow", SLEN("auth.allow"),
+ old_auth_allow_list);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Unable to replace auth.allow list");
+ goto out;
+ }
+
+ dict_del_sizen(volinfo->dict, "old.auth.allow");
+
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "failed to create volfiles");
+ goto out;
+ }
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_STORE_FAIL,
+ "failed to store volinfo");
+ goto out;
+ }
+out:
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index 05346916968..bf6ac295e26 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -859,6 +859,7 @@ glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
int32_t count);
int32_t
glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type,
+ char **volname, char **bricks, int32_t *brick_count,
int32_t sub_count);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c
index 087be916c23..8d6fb5e0fac 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volgen.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c
@@ -3361,11 +3361,20 @@ volgen_link_bricks(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
j);
j++;
}
+
if (!xl) {
ret = -1;
goto out;
}
+ if (strncmp(xl_type, "performance/readdir-ahead",
+ SLEN("performance/readdir-ahead")) == 0) {
+ ret = xlator_set_fixed_option(xl, "performance.readdir-ahead",
+ "on");
+ if (ret)
+ goto out;
+ }
+
ret = volgen_xlator_link(xl, trav);
if (ret)
goto out;
@@ -3593,13 +3602,13 @@ volgen_graph_build_readdir_ahead(volgen_graph_t *graph,
int32_t clusters = 0;
if (graph->type == GF_QUOTAD || graph->type == GF_SNAPD ||
- !glusterd_volinfo_get_boolean(volinfo, VKEY_PARALLEL_READDIR) ||
- !glusterd_volinfo_get_boolean(volinfo, VKEY_READDIR_AHEAD))
+ !glusterd_volinfo_get_boolean(volinfo, VKEY_PARALLEL_READDIR))
goto out;
clusters = volgen_link_bricks_from_list_tail(
graph, volinfo, "performance/readdir-ahead", "%s-readdir-ahead-%d",
child_count, 1);
+
out:
return clusters;
}
@@ -3801,6 +3810,38 @@ out:
}
static int
+set_volfile_id_option(volgen_graph_t *graph, glusterd_volinfo_t *volinfo,
+ int clusters)
+{
+ xlator_t *xlator = NULL;
+ int i = 0;
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ if (conf->op_version < GD_OP_VERSION_9_0)
+ return 0;
+ xlator = first_of(graph);
+
+ for (i = 0; i < clusters; i++) {
+ ret = xlator_set_fixed_option(xlator, "volume-id",
+ uuid_utoa(volinfo->volume_id));
+ if (ret)
+ goto out;
+
+ xlator = xlator->next;
+ }
+
+out:
+ return ret;
+}
+
+static int
volgen_graph_build_afr_clusters(volgen_graph_t *graph,
glusterd_volinfo_t *volinfo)
{
@@ -3842,6 +3883,13 @@ volgen_graph_build_afr_clusters(volgen_graph_t *graph,
clusters = -1;
goto out;
}
+
+ ret = set_volfile_id_option(graph, volinfo, clusters);
+ if (ret) {
+ clusters = -1;
+ goto out;
+ }
+
if (!volinfo->arbiter_count && !volinfo->thin_arbiter_count)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index cafdffb63c4..814ab14fb27 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -1002,7 +1002,8 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
gf_msg_debug(this->name, 0,
"Replicate cluster type "
"found. Checking brick order.");
- ret = glusterd_check_brick_order(dict, msg, type,
+ ret = glusterd_check_brick_order(dict, msg, type, &volname,
+ &bricks, &brick_count,
replica_count);
} else if (type == GF_CLUSTER_TYPE_DISPERSE) {
ret = dict_get_int32n(dict, "disperse-count",
@@ -1016,7 +1017,8 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
gf_msg_debug(this->name, 0,
"Disperse cluster type"
" found. Checking brick order.");
- ret = glusterd_check_brick_order(dict, msg, type,
+ ret = glusterd_check_brick_order(dict, msg, type, &volname,
+ &bricks, &brick_count,
disperse_count);
}
if (ret) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
index 3ac8e2a29d7..398b4d76f52 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c
@@ -1813,7 +1813,7 @@ struct volopt_map_entry glusterd_volopt_map[] = {
{.key = "performance.readdir-ahead",
.voltype = "performance/readdir-ahead",
.option = "!perf",
- .value = "on",
+ .value = "off",
.op_version = 3,
.description = "enable/disable readdir-ahead translator in the volume.",
.flags = VOLOPT_FLAG_CLIENT_OPT | VOLOPT_FLAG_XLATOR_OPT},
@@ -3138,4 +3138,9 @@ struct volopt_map_entry glusterd_volopt_map[] = {
.type = NO_DOC,
},
+ {.key = "cluster.use-anonymous-inode",
+ .voltype = "cluster/replicate",
+ .op_version = GD_OP_VERSION_9_0,
+ .value = "yes",
+ .flags = VOLOPT_FLAG_CLIENT_OPT},
{.key = NULL}};
diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c
index 91c5f9ec5e3..7a86c2997b1 100644
--- a/xlators/mgmt/glusterd/src/glusterd.c
+++ b/xlators/mgmt/glusterd/src/glusterd.c
@@ -1423,7 +1423,7 @@ init(xlator_t *this)
char *mountbroker_root = NULL;
int i = 0;
int total_transport = 0;
- gf_boolean_t valgrind = _gf_false;
+ gf_valgrind_tool vgtool;
char *valgrind_str = NULL;
char *transport_type = NULL;
char var_run_dir[PATH_MAX] = {
@@ -1436,6 +1436,14 @@ init(xlator_t *this)
int32_t len = 0;
int op_version = 0;
+#if defined(RUN_WITH_MEMCHECK)
+ vgtool = _gf_memcheck;
+#elif defined(RUN_WITH_DRD)
+ vgtool = _gf_drd;
+#else
+ vgtool = _gf_none;
+#endif
+
#ifndef GF_DARWIN_HOST_OS
{
struct rlimit lim;
@@ -1925,18 +1933,24 @@ init(xlator_t *this)
}
/* Set option to run bricks on valgrind if enabled in glusterd.vol */
- this->ctx->cmd_args.valgrind = valgrind;
+ this->ctx->cmd_args.vgtool = vgtool;
ret = dict_get_str(this->options, "run-with-valgrind", &valgrind_str);
if (ret < 0) {
gf_msg_debug(this->name, 0, "cannot get run-with-valgrind value");
}
if (valgrind_str) {
- if (gf_string2boolean(valgrind_str, &valgrind)) {
+ gf_boolean_t vg = _gf_false;
+
+ if (!strcmp(valgrind_str, "memcheck"))
+ this->ctx->cmd_args.vgtool = _gf_memcheck;
+ else if (!strcmp(valgrind_str, "drd"))
+ this->ctx->cmd_args.vgtool = _gf_drd;
+ else if (!gf_string2boolean(valgrind_str, &vg))
+ this->ctx->cmd_args.vgtool = (vg ? _gf_memcheck : _gf_none);
+ else
gf_msg(this->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY,
- "run-with-valgrind value not a boolean string");
- } else {
- this->ctx->cmd_args.valgrind = valgrind;
- }
+ "run-with-valgrind is neither boolean"
+ " nor one of 'memcheck' or 'drd'");
}
/* Store ping-timeout in conf */
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 2c8fab8f0e7..cc4f98ecf47 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -1197,6 +1197,8 @@ glusterd_op_set_ganesha(dict_t *dict, char **errstr);
int
ganesha_manage_export(dict_t *dict, char *value,
gf_boolean_t update_cache_invalidation, char **op_errstr);
+int
+gd_ganesha_send_dbus(char *volname, char *value);
gf_boolean_t
glusterd_is_ganesha_cluster();
gf_boolean_t
@@ -1367,4 +1369,7 @@ glusterd_recreate_volfiles(glusterd_conf_t *conf);
void
glusterd_add_peers_to_auth_list(char *volname);
+int
+glusterd_replace_old_auth_allow_list(char *volname);
+
#endif