summaryrefslogtreecommitdiffstats
path: root/doc/legacy/replicate.pdf
diff options
context:
space:
mode:
Diffstat (limited to 'doc/legacy/replicate.pdf')
0 files changed, 0 insertions, 0 deletions
te16868dede6455cab644805af6fe1ac312775e13 (patch) tree15aebdb4fff2d87cf8a72f836816b3aa634da58d /xlators/mgmt/glusterd/src parent45a71c0548b6fd2c757aa2e7b7671a1411948894 (diff)
Land part 2 of clang-format changes
Change-Id: Ia84cc24c8924e6d22d02ac15f611c10e26db99b4 Signed-off-by: Nigel Babu <nigelb@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitd-svc.c283
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c1209
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c5931
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-helper.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c169
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.c11556
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c288
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c770
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c11146
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c3973
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.c904
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-locks.c1389
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-log-ops.c450
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c1647
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c4550
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.c1224
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.c317
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c14005
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c1488
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.c953
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c180
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c3671
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.c311
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c1862
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c1195
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-reset-brick.c653
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c4101
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.c280
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.c732
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.c379
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c2515
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc-helper.c69
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c749
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c6987
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c17855
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.c388
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c8385
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c408
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c523
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c3299
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tier.c2377
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c284
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc.c815
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c23563
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c10876
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c5688
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c6986
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c3714
48 files changed, 84470 insertions, 86631 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
index 69c70754731..b01c2599dfb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
@@ -17,193 +17,190 @@
#include "glusterd-svc-helper.h"
void
-glusterd_bitdsvc_build (glusterd_svc_t *svc)
+glusterd_bitdsvc_build(glusterd_svc_t *svc)
{
- svc->manager = glusterd_bitdsvc_manager;
- svc->start = glusterd_bitdsvc_start;
- svc->stop = glusterd_bitdsvc_stop;
+ svc->manager = glusterd_bitdsvc_manager;
+ svc->start = glusterd_bitdsvc_start;
+ svc->stop = glusterd_bitdsvc_stop;
}
int
-glusterd_bitdsvc_init (glusterd_svc_t *svc)
+glusterd_bitdsvc_init(glusterd_svc_t *svc)
{
- return glusterd_svc_init (svc, bitd_svc_name);
+ return glusterd_svc_init(svc, bitd_svc_name);
}
static int
-glusterd_bitdsvc_create_volfile ()
+glusterd_bitdsvc_create_volfile()
{
- char filepath[PATH_MAX] = {0,};
- int ret = -1;
- glusterd_conf_t *conf = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- conf = this->private;
- GF_ASSERT (conf);
-
-
- glusterd_svc_build_volfile_path (bitd_svc_name, conf->workdir,
- filepath, sizeof (filepath));
-
- ret = glusterd_create_global_volfile (build_bitd_graph,
- filepath, NULL);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Failed to create volfile");
- goto out;
- }
+ char filepath[PATH_MAX] = {
+ 0,
+ };
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ glusterd_svc_build_volfile_path(bitd_svc_name, conf->workdir, filepath,
+ sizeof(filepath));
+
+ ret = glusterd_create_global_volfile(build_bitd_graph, filepath, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Failed to create volfile");
+ goto out;
+ }
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
int
-glusterd_bitdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
+glusterd_bitdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
{
- int ret = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- if (!svc->inited) {
- ret = glusterd_bitdsvc_init (svc);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BITD_INIT_FAIL, "Failed to init "
- "bitd service");
- goto out;
- } else {
- svc->inited = _gf_true;
- gf_msg_debug (this->name, 0, "BitD service "
- "initialized");
- }
- }
+ int ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
- if (glusterd_should_i_stop_bitd ()) {
- ret = svc->stop (svc, SIGTERM);
+ if (!svc->inited) {
+ ret = glusterd_bitdsvc_init(svc);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITD_INIT_FAIL,
+ "Failed to init "
+ "bitd service");
+ goto out;
} else {
- ret = glusterd_bitdsvc_create_volfile ();
- if (ret)
- goto out;
+ svc->inited = _gf_true;
+ gf_msg_debug(this->name, 0,
+ "BitD service "
+ "initialized");
+ }
+ }
+
+ if (glusterd_should_i_stop_bitd()) {
+ ret = svc->stop(svc, SIGTERM);
+ } else {
+ ret = glusterd_bitdsvc_create_volfile();
+ if (ret)
+ goto out;
- ret = svc->stop (svc, SIGKILL);
- if (ret)
- goto out;
+ ret = svc->stop(svc, SIGKILL);
+ if (ret)
+ goto out;
- ret = svc->start (svc, flags);
- if (ret)
- goto out;
+ ret = svc->start(svc, flags);
+ if (ret)
+ goto out;
- ret = glusterd_conn_connect (&(svc->conn));
- if (ret)
- goto out;
- }
+ ret = glusterd_conn_connect(&(svc->conn));
+ if (ret)
+ goto out;
+ }
out:
- if (ret)
- gf_event (EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+ if (ret)
+ gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
int
-glusterd_bitdsvc_start (glusterd_svc_t *svc, int flags)
+glusterd_bitdsvc_start(glusterd_svc_t *svc, int flags)
{
- int ret = -1;
- dict_t *cmdict = NULL;
+ int ret = -1;
+ dict_t *cmdict = NULL;
- cmdict = dict_new ();
- if (!cmdict)
- goto error_return;
+ cmdict = dict_new();
+ if (!cmdict)
+ goto error_return;
- ret = dict_set_str (cmdict, "cmdarg0", "--global-timer-wheel");
- if (ret)
- goto dealloc_dict;
+ ret = dict_set_str(cmdict, "cmdarg0", "--global-timer-wheel");
+ if (ret)
+ goto dealloc_dict;
- ret = glusterd_svc_start (svc, flags, cmdict);
+ ret = glusterd_svc_start(svc, flags, cmdict);
- dealloc_dict:
- dict_unref (cmdict);
- error_return:
- return ret;
+dealloc_dict:
+ dict_unref(cmdict);
+error_return:
+ return ret;
}
int
-glusterd_bitdsvc_stop (glusterd_svc_t *svc, int sig)
+glusterd_bitdsvc_stop(glusterd_svc_t *svc, int sig)
{
- return glusterd_svc_stop (svc, sig);
+ return glusterd_svc_stop(svc, sig);
}
int
-glusterd_bitdsvc_reconfigure ()
+glusterd_bitdsvc_reconfigure()
{
- int ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- gf_boolean_t identical = _gf_false;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO (this->name, this, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
-
- if (glusterd_should_i_stop_bitd ())
- goto manager;
- /*
- * Check both OLD and NEW volfiles, if they are SAME by size
- * and cksum i.e. "character-by-character". If YES, then
- * NOTHING has been changed, just return.
- */
- ret = glusterd_svc_check_volfile_identical (priv->bitd_svc.name,
- build_bitd_graph,
- &identical);
- if (ret)
- goto out;
- if (identical) {
- ret = 0;
- goto out;
- }
-
- /*
- * They are not identical. Find out if the topology is changed
- * OR just the volume options. If just the options which got
- * changed, then inform the xlator to reconfigure the options.
- */
- identical = _gf_false; /* RESET the FLAG */
- ret = glusterd_svc_check_topology_identical (priv->bitd_svc.name,
- build_bitd_graph,
- &identical);
- if (ret)
- goto out; /*not able to compare due to some corruption */
-
- /* Topology is not changed, but just the options. But write the
- * options to bitd volfile, so that bitd will be reconfigured.
- */
- if (identical) {
- ret = glusterd_bitdsvc_create_volfile ();
- if (ret == 0) {/* Only if above PASSES */
- ret = glusterd_fetchspec_notify (THIS);
- }
- goto out;
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t identical = _gf_false;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(this->name, this, out);
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ if (glusterd_should_i_stop_bitd())
+ goto manager;
+ /*
+ * Check both OLD and NEW volfiles, if they are SAME by size
+ * and cksum i.e. "character-by-character". If YES, then
+ * NOTHING has been changed, just return.
+ */
+ ret = glusterd_svc_check_volfile_identical(priv->bitd_svc.name,
+ build_bitd_graph, &identical);
+ if (ret)
+ goto out;
+ if (identical) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * They are not identical. Find out if the topology is changed
+ * OR just the volume options. If just the options which got
+ * changed, then inform the xlator to reconfigure the options.
+ */
+ identical = _gf_false; /* RESET the FLAG */
+ ret = glusterd_svc_check_topology_identical(priv->bitd_svc.name,
+ build_bitd_graph, &identical);
+ if (ret)
+ goto out; /*not able to compare due to some corruption */
+
+ /* Topology is not changed, but just the options. But write the
+ * options to bitd volfile, so that bitd will be reconfigured.
+ */
+ if (identical) {
+ ret = glusterd_bitdsvc_create_volfile();
+ if (ret == 0) { /* Only if above PASSES */
+ ret = glusterd_fetchspec_notify(THIS);
}
+ goto out;
+ }
manager:
- /*
- * bitd volfile's topology has been changed. bitd server needs
- * to be RESTARTED to ACT on the changed volfile.
- */
- ret = priv->bitd_svc.manager (&(priv->bitd_svc), NULL,
- PROC_START_NO_WAIT);
+ /*
+ * bitd volfile's topology has been changed. bitd server needs
+ * to be RESTARTED to ACT on the changed volfile.
+ */
+ ret = priv->bitd_svc.manager(&(priv->bitd_svc), NULL, PROC_START_NO_WAIT);
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
index 10babdb08fa..0608badb91d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
@@ -27,712 +27,711 @@
#include <dlfcn.h>
const char *gd_bitrot_op_list[GF_BITROT_OPTION_TYPE_MAX] = {
- [GF_BITROT_OPTION_TYPE_NONE] = "none",
- [GF_BITROT_OPTION_TYPE_ENABLE] = "enable",
- [GF_BITROT_OPTION_TYPE_DISABLE] = "disable",
- [GF_BITROT_OPTION_TYPE_SCRUB_THROTTLE] = "scrub-throttle",
- [GF_BITROT_OPTION_TYPE_SCRUB_FREQ] = "scrub-frequency",
- [GF_BITROT_OPTION_TYPE_SCRUB] = "scrub",
- [GF_BITROT_OPTION_TYPE_EXPIRY_TIME] = "expiry-time",
+ [GF_BITROT_OPTION_TYPE_NONE] = "none",
+ [GF_BITROT_OPTION_TYPE_ENABLE] = "enable",
+ [GF_BITROT_OPTION_TYPE_DISABLE] = "disable",
+ [GF_BITROT_OPTION_TYPE_SCRUB_THROTTLE] = "scrub-throttle",
+ [GF_BITROT_OPTION_TYPE_SCRUB_FREQ] = "scrub-frequency",
+ [GF_BITROT_OPTION_TYPE_SCRUB] = "scrub",
+ [GF_BITROT_OPTION_TYPE_EXPIRY_TIME] = "expiry-time",
};
int
-__glusterd_handle_bitrot (rpcsvc_request_t *req)
+__glusterd_handle_bitrot(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = { {0,} };
- dict_t *dict = NULL;
- glusterd_op_t cli_op = GD_OP_BITROT;
- char *volname = NULL;
- char *scrub = NULL;
- int32_t type = 0;
- char msg[256] = {0,};
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- GF_ASSERT (req);
-
- this = THIS;
- GF_ASSERT (this);
-
- conf = this->private;
- GF_ASSERT (conf);
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_BITROT;
+ char *volname = NULL;
+ char *scrub = NULL;
+ int32_t type = 0;
+ char msg[256] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (msg, sizeof (msg), "Unable to decode the "
- "command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
- }
-
- ret = dict_get_str (dict, "volname", &volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(msg, sizeof(msg),
+ "Unable to decode the "
+ "command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Unable to get volume name");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name, "
+ "while handling bitrot command");
+ goto out;
+ }
+
+ ret = dict_get_int32(dict, "type", &type);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Unable to get type of command");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get type of cmd, "
+ "while handling bitrot command");
+ goto out;
+ }
+
+ if (conf->op_version < GD_OP_VERSION_3_7_0) {
+ snprintf(msg, sizeof(msg),
+ "Cannot execute command. The "
+ "cluster is operating at version %d. Bitrot command "
+ "%s is unavailable in this version",
+ conf->op_version, gd_bitrot_op_list[type]);
+ ret = -1;
+ goto out;
+ }
+
+ if (type == GF_BITROT_CMD_SCRUB_STATUS) {
+ /* Backward compatibility handling for scrub status command*/
+ if (conf->op_version < GD_OP_VERSION_3_7_7) {
+ snprintf(msg, sizeof(msg),
+ "Cannot execute command. "
+ "The cluster is operating at version %d. "
+ "Bitrot scrub status command unavailable in "
+ "this version",
+ conf->op_version);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "scrub-value", &scrub);
if (ret) {
- snprintf (msg, sizeof (msg), "Unable to get volume name");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name, "
- "while handling bitrot command");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get scrub value.");
+ ret = -1;
+ goto out;
}
- ret = dict_get_int32 (dict, "type", &type);
- if (ret) {
- snprintf (msg, sizeof (msg), "Unable to get type of command");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get type of cmd, "
- "while handling bitrot command");
- goto out;
+ if (!strncmp(scrub, "status", SLEN("status"))) {
+ ret = glusterd_op_begin_synctask(req, GD_OP_SCRUB_STATUS, dict);
+ goto out;
}
+ }
- if (conf->op_version < GD_OP_VERSION_3_7_0) {
- snprintf (msg, sizeof (msg), "Cannot execute command. The "
- "cluster is operating at version %d. Bitrot command "
- "%s is unavailable in this version", conf->op_version,
- gd_bitrot_op_list[type]);
- ret = -1;
- goto out;
+ if (type == GF_BITROT_CMD_SCRUB_ONDEMAND) {
+ /* Backward compatibility handling for scrub status command*/
+ if (conf->op_version < GD_OP_VERSION_3_9_0) {
+ snprintf(msg, sizeof(msg),
+ "Cannot execute command. "
+ "The cluster is operating at version %d. "
+ "Bitrot scrub ondemand command unavailable in "
+ "this version",
+ conf->op_version);
+ ret = -1;
+ goto out;
}
- if (type == GF_BITROT_CMD_SCRUB_STATUS) {
- /* Backward compatibility handling for scrub status command*/
- if (conf->op_version < GD_OP_VERSION_3_7_7) {
- snprintf (msg, sizeof (msg), "Cannot execute command. "
- "The cluster is operating at version %d. "
- "Bitrot scrub status command unavailable in "
- "this version", conf->op_version);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str (dict, "scrub-value", &scrub);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get scrub value.");
- ret = -1;
- goto out;
- }
-
- if (!strncmp (scrub, "status", SLEN ("status"))) {
- ret = glusterd_op_begin_synctask (req,
- GD_OP_SCRUB_STATUS,
- dict);
- goto out;
- }
+ ret = dict_get_str(dict, "scrub-value", &scrub);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get scrub value.");
+ ret = -1;
+ goto out;
}
- if (type == GF_BITROT_CMD_SCRUB_ONDEMAND) {
- /* Backward compatibility handling for scrub status command*/
- if (conf->op_version < GD_OP_VERSION_3_9_0) {
- snprintf (msg, sizeof (msg), "Cannot execute command. "
- "The cluster is operating at version %d. "
- "Bitrot scrub ondemand command unavailable in "
- "this version", conf->op_version);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str (dict, "scrub-value", &scrub);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get scrub value.");
- ret = -1;
- goto out;
- }
-
- if (!strncmp (scrub, "ondemand", SLEN ("ondemand"))) {
- ret = glusterd_op_begin_synctask (req,
- GD_OP_SCRUB_ONDEMAND,
- dict);
- goto out;
- }
+ if (!strncmp(scrub, "ondemand", SLEN("ondemand"))) {
+ ret = glusterd_op_begin_synctask(req, GD_OP_SCRUB_ONDEMAND, dict);
+ goto out;
}
+ }
- ret = glusterd_op_begin_synctask (req, GD_OP_BITROT, dict);
+ ret = glusterd_op_begin_synctask(req, GD_OP_BITROT, dict);
out:
- if (ret) {
- if (msg[0] == '\0')
- snprintf (msg, sizeof (msg), "Bitrot operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, msg);
- }
+ if (ret) {
+ if (msg[0] == '\0')
+ snprintf(msg, sizeof(msg), "Bitrot operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, msg);
+ }
- return ret;
+ return ret;
}
int
-glusterd_handle_bitrot (rpcsvc_request_t *req)
+glusterd_handle_bitrot(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_bitrot);
+ return glusterd_big_locked_handler(req, __glusterd_handle_bitrot);
}
static int
-glusterd_bitrot_scrub_throttle (glusterd_volinfo_t *volinfo, dict_t *dict,
- char *key, char **op_errstr)
+glusterd_bitrot_scrub_throttle(glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char **op_errstr)
{
- int32_t ret = -1;
- char *scrub_throttle = NULL;
- char *option = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = dict_get_str (dict, "scrub-throttle-value", &scrub_throttle);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to fetch scrub-"
- "throttle value");
- goto out;
- }
-
- option = gf_strdup (scrub_throttle);
- ret = dict_set_dynstr (volinfo->dict, key, option);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set option %s",
- key);
- goto out;
- }
-
- ret = glusterd_scrubsvc_reconfigure ();
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SCRUBSVC_RECONF_FAIL,
- "Failed to reconfigure scrub "
- "services");
- goto out;
- }
+ int32_t ret = -1;
+ char *scrub_throttle = NULL;
+ char *option = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_str(dict, "scrub-throttle-value", &scrub_throttle);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch scrub-"
+ "throttle value");
+ goto out;
+ }
+
+ option = gf_strdup(scrub_throttle);
+ ret = dict_set_dynstr(volinfo->dict, key, option);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set option %s", key);
+ goto out;
+ }
+
+ ret = glusterd_scrubsvc_reconfigure();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SCRUBSVC_RECONF_FAIL,
+ "Failed to reconfigure scrub "
+ "services");
+ goto out;
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_bitrot_scrub_freq (glusterd_volinfo_t *volinfo, dict_t *dict,
- char *key, char **op_errstr)
+glusterd_bitrot_scrub_freq(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
+ char **op_errstr)
{
- int32_t ret = -1;
- char *scrub_freq = NULL;
- xlator_t *this = NULL;
- char *option = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = dict_get_str (dict, "scrub-frequency-value", &scrub_freq);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to fetch scrub-"
- "freq value");
- goto out;
- }
-
- option = gf_strdup (scrub_freq);
- ret = dict_set_dynstr (volinfo->dict, key, option);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set option %s",
- key);
- goto out;
- }
-
- ret = glusterd_scrubsvc_reconfigure ();
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SCRUBSVC_RECONF_FAIL,
- "Failed to reconfigure scrub "
- "services");
- goto out;
- }
+ int32_t ret = -1;
+ char *scrub_freq = NULL;
+ xlator_t *this = NULL;
+ char *option = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_str(dict, "scrub-frequency-value", &scrub_freq);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch scrub-"
+ "freq value");
+ goto out;
+ }
+
+ option = gf_strdup(scrub_freq);
+ ret = dict_set_dynstr(volinfo->dict, key, option);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set option %s", key);
+ goto out;
+ }
+
+ ret = glusterd_scrubsvc_reconfigure();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SCRUBSVC_RECONF_FAIL,
+ "Failed to reconfigure scrub "
+ "services");
+ goto out;
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_bitrot_scrub (glusterd_volinfo_t *volinfo, dict_t *dict,
- char *key, char **op_errstr)
+glusterd_bitrot_scrub(glusterd_volinfo_t *volinfo, dict_t *dict, char *key,
+ char **op_errstr)
{
- int32_t ret = -1;
- char *scrub_value = NULL;
- xlator_t *this = NULL;
- char *option = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = dict_get_str (dict, "scrub-value", &scrub_value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to fetch scrub"
- "value");
- goto out;
- }
-
- if (!strcmp (scrub_value, "resume")) {
- option = gf_strdup ("Active");
- } else {
- option = gf_strdup (scrub_value);
- }
-
- ret = dict_set_dynstr (volinfo->dict, key, option);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set option %s",
- key);
- goto out;
- }
-
- ret = glusterd_scrubsvc_reconfigure ();
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SCRUBSVC_RECONF_FAIL,
- "Failed to reconfigure scrub "
- "services");
- goto out;
- }
+ int32_t ret = -1;
+ char *scrub_value = NULL;
+ xlator_t *this = NULL;
+ char *option = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_str(dict, "scrub-value", &scrub_value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch scrub"
+ "value");
+ goto out;
+ }
+
+ if (!strcmp(scrub_value, "resume")) {
+ option = gf_strdup("Active");
+ } else {
+ option = gf_strdup(scrub_value);
+ }
+
+ ret = dict_set_dynstr(volinfo->dict, key, option);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set option %s", key);
+ goto out;
+ }
+
+ ret = glusterd_scrubsvc_reconfigure();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SCRUBSVC_RECONF_FAIL,
+ "Failed to reconfigure scrub "
+ "services");
+ goto out;
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_bitrot_expiry_time (glusterd_volinfo_t *volinfo, dict_t *dict,
- char *key, char **op_errstr)
+glusterd_bitrot_expiry_time(glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *key, char **op_errstr)
{
- int32_t ret = -1;
- uint32_t expiry_time = 0;
- xlator_t *this = NULL;
- char dkey[1024] = {0,};
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = dict_get_uint32 (dict, "expiry-time", &expiry_time);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get bitrot expiry"
- " timer value.");
- goto out;
- }
-
- snprintf (dkey, sizeof (dkey), "%d", expiry_time);
-
- ret = dict_set_dynstr_with_alloc (volinfo->dict, key, dkey);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set option %s",
- key);
- goto out;
- }
-
- ret = glusterd_bitdsvc_reconfigure ();
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BITDSVC_RECONF_FAIL,
- "Failed to reconfigure bitrot"
- "services");
- goto out;
- }
+ int32_t ret = -1;
+ uint32_t expiry_time = 0;
+ xlator_t *this = NULL;
+ char dkey[1024] = {
+ 0,
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_uint32(dict, "expiry-time", &expiry_time);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get bitrot expiry"
+ " timer value.");
+ goto out;
+ }
+
+ snprintf(dkey, sizeof(dkey), "%d", expiry_time);
+
+ ret = dict_set_dynstr_with_alloc(volinfo->dict, key, dkey);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set option %s", key);
+ goto out;
+ }
+
+ ret = glusterd_bitdsvc_reconfigure();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITDSVC_RECONF_FAIL,
+ "Failed to reconfigure bitrot"
+ "services");
+ goto out;
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_bitrot_enable (glusterd_volinfo_t *volinfo, char **op_errstr)
+glusterd_bitrot_enable(glusterd_volinfo_t *volinfo, char **op_errstr)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
- GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
-
- if (glusterd_is_volume_started (volinfo) == 0) {
- *op_errstr = gf_strdup ("Volume is stopped, start volume "
- "to enable bitrot.");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_is_bitrot_enabled (volinfo);
- if (ret) {
- *op_errstr = gf_strdup ("Bitrot is already enabled");
- ret = -1;
- goto out;
- }
-
- ret = dict_set_dynstr_with_alloc (volinfo->dict, VKEY_FEATURES_BITROT,
- "on");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "dict set failed");
- goto out;
- }
-
- /*Once bitrot is enable scrubber should be in Active state*/
- ret = dict_set_dynstr_with_alloc (volinfo->dict, "features.scrub",
- "Active");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set option "
- "features.scrub value");
- goto out;
- }
-
- ret = 0;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
+
+ if (glusterd_is_volume_started(volinfo) == 0) {
+ *op_errstr = gf_strdup(
+ "Volume is stopped, start volume "
+ "to enable bitrot.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_is_bitrot_enabled(volinfo);
+ if (ret) {
+ *op_errstr = gf_strdup("Bitrot is already enabled");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr_with_alloc(volinfo->dict, VKEY_FEATURES_BITROT, "on");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "dict set failed");
+ goto out;
+ }
+
+ /*Once bitrot is enable scrubber should be in Active state*/
+ ret = dict_set_dynstr_with_alloc(volinfo->dict, "features.scrub", "Active");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set option "
+ "features.scrub value");
+ goto out;
+ }
+
+ ret = 0;
out:
- if (ret && op_errstr && !*op_errstr)
- gf_asprintf (op_errstr, "Enabling bitrot on volume %s has been "
- "unsuccessful", volinfo->volname);
- return ret;
+ if (ret && op_errstr && !*op_errstr)
+ gf_asprintf(op_errstr,
+ "Enabling bitrot on volume %s has been "
+ "unsuccessful",
+ volinfo->volname);
+ return ret;
}
static int
-glusterd_bitrot_disable (glusterd_volinfo_t *volinfo, char **op_errstr)
+glusterd_bitrot_disable(glusterd_volinfo_t *volinfo, char **op_errstr)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
- GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
-
- ret = dict_set_dynstr_with_alloc (volinfo->dict, VKEY_FEATURES_BITROT,
- "off");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "dict set failed");
- goto out;
- }
-
- /*Once bitrot disabled scrubber should be Inactive state*/
- ret = dict_set_dynstr_with_alloc (volinfo->dict, "features.scrub",
- "Inactive");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set "
- "features.scrub value");
- goto out;
- }
-
- ret = 0;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
+
+ ret = dict_set_dynstr_with_alloc(volinfo->dict, VKEY_FEATURES_BITROT,
+ "off");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "dict set failed");
+ goto out;
+ }
+
+ /*Once bitrot disabled scrubber should be Inactive state*/
+ ret = dict_set_dynstr_with_alloc(volinfo->dict, "features.scrub",
+ "Inactive");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set "
+ "features.scrub value");
+ goto out;
+ }
+
+ ret = 0;
out:
- if (ret && op_errstr && !*op_errstr)
- gf_asprintf (op_errstr, "Disabling bitrot on volume %s has "
- "been unsuccessful", volinfo->volname);
- return ret;
+ if (ret && op_errstr && !*op_errstr)
+ gf_asprintf(op_errstr,
+ "Disabling bitrot on volume %s has "
+ "been unsuccessful",
+ volinfo->volname);
+ return ret;
}
gf_boolean_t
-glusterd_should_i_stop_bitd ()
+glusterd_should_i_stop_bitd()
{
- glusterd_conf_t *conf = THIS->private;
- glusterd_volinfo_t *volinfo = NULL;
- gf_boolean_t stopped = _gf_true;
- glusterd_brickinfo_t *brickinfo = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- cds_list_for_each_entry (volinfo, &conf->volumes, vol_list) {
- if (!glusterd_is_bitrot_enabled (volinfo))
- continue;
- else if (volinfo->status != GLUSTERD_STATUS_STARTED)
- continue;
- else {
- cds_list_for_each_entry (brickinfo, &volinfo->bricks,
- brick_list) {
- if (!glusterd_is_local_brick (this, volinfo,
- brickinfo))
- continue;
- stopped = _gf_false;
- return stopped;
- }
-
- /* Before stopping bitrot/scrubber daemon check
- * other volume also whether respective volume
- * host a brick from this node or not.*/
- continue;
- }
- }
-
- return stopped;
+ glusterd_conf_t *conf = THIS->private;
+ glusterd_volinfo_t *volinfo = NULL;
+ gf_boolean_t stopped = _gf_true;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
+ {
+ if (!glusterd_is_bitrot_enabled(volinfo))
+ continue;
+ else if (volinfo->status != GLUSTERD_STATUS_STARTED)
+ continue;
+ else {
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (!glusterd_is_local_brick(this, volinfo, brickinfo))
+ continue;
+ stopped = _gf_false;
+ return stopped;
+ }
+
+ /* Before stopping bitrot/scrubber daemon check
+ * other volume also whether respective volume
+ * host a brick from this node or not.*/
+ continue;
+ }
+ }
+
+ return stopped;
}
static int
-glusterd_manage_bitrot (int opcode)
+glusterd_manage_bitrot(int opcode)
{
- int ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
- this = THIS;
- GF_ASSERT (this);
+ this = THIS;
+ GF_ASSERT(this);
- priv = this->private;
- GF_ASSERT (priv);
+ priv = this->private;
+ GF_ASSERT(priv);
- switch (opcode) {
+ switch (opcode) {
case GF_BITROT_OPTION_TYPE_ENABLE:
case GF_BITROT_OPTION_TYPE_DISABLE:
- ret = priv->bitd_svc.manager (&(priv->bitd_svc),
- NULL, PROC_START_NO_WAIT);
- if (ret)
- break;
- ret = priv->scrub_svc.manager (&(priv->scrub_svc), NULL,
- PROC_START_NO_WAIT);
+ ret = priv->bitd_svc.manager(&(priv->bitd_svc), NULL,
+ PROC_START_NO_WAIT);
+ if (ret)
break;
+ ret = priv->scrub_svc.manager(&(priv->scrub_svc), NULL,
+ PROC_START_NO_WAIT);
+ break;
default:
- ret = 0;
- break;
- }
-
- return ret;
+ ret = 0;
+ break;
+ }
+ return ret;
}
int
-glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+glusterd_op_bitrot(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- glusterd_volinfo_t *volinfo = NULL;
- int32_t ret = -1;
- char *volname = NULL;
- int type = -1;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
- goto out;
- }
-
- ret = dict_get_int32 (dict, "type", &type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get type from "
- "dict");
- goto out;
- }
-
- switch (type) {
+ glusterd_volinfo_t *volinfo = NULL;
+ int32_t ret = -1;
+ char *volname = NULL;
+ int type = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ ret = dict_get_int32(dict, "type", &type);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get type from "
+ "dict");
+ goto out;
+ }
+
+ switch (type) {
case GF_BITROT_OPTION_TYPE_ENABLE:
- ret = glusterd_bitrot_enable (volinfo, op_errstr);
- if (ret < 0)
- goto out;
- break;
+ ret = glusterd_bitrot_enable(volinfo, op_errstr);
+ if (ret < 0)
+ goto out;
+ break;
case GF_BITROT_OPTION_TYPE_DISABLE:
- ret = glusterd_bitrot_disable (volinfo, op_errstr);
- if (ret < 0)
- goto out;
+ ret = glusterd_bitrot_disable(volinfo, op_errstr);
+ if (ret < 0)
+ goto out;
- break;
+ break;
case GF_BITROT_OPTION_TYPE_SCRUB_THROTTLE:
- ret = glusterd_bitrot_scrub_throttle (volinfo, dict,
- "features.scrub-throttle",
- op_errstr);
- if (ret)
- goto out;
- break;
+ ret = glusterd_bitrot_scrub_throttle(
+ volinfo, dict, "features.scrub-throttle", op_errstr);
+ if (ret)
+ goto out;
+ break;
case GF_BITROT_OPTION_TYPE_SCRUB_FREQ:
- ret = glusterd_bitrot_scrub_freq (volinfo, dict,
- "features.scrub-freq",
- op_errstr);
- if (ret)
- goto out;
- break;
+ ret = glusterd_bitrot_scrub_freq(volinfo, dict,
+ "features.scrub-freq", op_errstr);
+ if (ret)
+ goto out;
+ break;
case GF_BITROT_OPTION_TYPE_SCRUB:
- ret = glusterd_bitrot_scrub (volinfo, dict, "features.scrub",
- op_errstr);
- if (ret)
- goto out;
- break;
+ ret = glusterd_bitrot_scrub(volinfo, dict, "features.scrub",
+ op_errstr);
+ if (ret)
+ goto out;
+ break;
case GF_BITROT_OPTION_TYPE_EXPIRY_TIME:
- ret = glusterd_bitrot_expiry_time (volinfo, dict,
- "features.expiry-time",
- op_errstr);
- if (ret)
- goto out;
+ ret = glusterd_bitrot_expiry_time(
+ volinfo, dict, "features.expiry-time", op_errstr);
+ if (ret)
+ goto out;
case GF_BITROT_CMD_SCRUB_STATUS:
case GF_BITROT_CMD_SCRUB_ONDEMAND:
- break;
+ break;
default:
- gf_asprintf (op_errstr, "Bitrot command failed. Invalid "
- "opcode");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_manage_bitrot (type);
- if (ret)
- goto out;
-
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL, "Unable to re-create "
- "volfiles");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret) {
- gf_msg_debug (this->name, 0, "Failed to store volinfo for "
- "bitrot");
- goto out;
- }
+ gf_asprintf(op_errstr,
+ "Bitrot command failed. Invalid "
+ "opcode");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_manage_bitrot(type);
+ if (ret)
+ goto out;
+
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Unable to re-create "
+ "volfiles");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Failed to store volinfo for "
+ "bitrot");
+ goto out;
+ }
out:
- return ret;
+ return ret;
}
int
-glusterd_op_stage_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+glusterd_op_stage_bitrot(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- int ret = 0;
- char *volname = NULL;
- char *scrub_cmd = NULL;
- char *scrub_cmd_from_dict = NULL;
- char msg[2048] = {0,};
- int type = 0;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
-
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
- goto out;
- }
-
- if (!glusterd_is_volume_started (volinfo)) {
- *op_errstr = gf_strdup ("Volume is stopped, start volume "
- "before executing bit rot command.");
+ int ret = 0;
+ char *volname = NULL;
+ char *scrub_cmd = NULL;
+ char *scrub_cmd_from_dict = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ int type = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ if (!glusterd_is_volume_started(volinfo)) {
+ *op_errstr = gf_strdup(
+ "Volume is stopped, start volume "
+ "before executing bit rot command.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_int32(dict, "type", &type);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get type for "
+ "operation");
+
+ *op_errstr = gf_strdup(
+ "Staging stage failed for bitrot "
+ "operation.");
+ goto out;
+ }
+
+ if ((GF_BITROT_OPTION_TYPE_ENABLE != type) &&
+ (glusterd_is_bitrot_enabled(volinfo) == 0)) {
+ ret = -1;
+ gf_asprintf(op_errstr, "Bitrot is not enabled on volume %s", volname);
+ goto out;
+ }
+
+ if ((GF_BITROT_OPTION_TYPE_SCRUB == type)) {
+ ret = dict_get_str(volinfo->dict, "features.scrub",
+ &scrub_cmd_from_dict);
+ if (!ret) {
+ ret = dict_get_str(dict, "scrub-value", &scrub_cmd);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to "
+ "get scrub-value");
+ *op_errstr = gf_strdup(
+ "Staging failed for "
+ "bitrot operation. "
+ "Please check log file"
+ " for more details.");
+ goto out;
+ }
+ /* If scrubber is resume then value of scrubber will be
+ * "Active" in the dictionary. */
+ if (!strcmp(scrub_cmd_from_dict, scrub_cmd) ||
+ (!strncmp("Active", scrub_cmd_from_dict, SLEN("Active")) &&
+ !strncmp("resume", scrub_cmd, SLEN("resume")))) {
+ snprintf(msg, sizeof(msg),
+ "Scrub is already"
+ " %sd for volume %s",
+ scrub_cmd, volinfo->volname);
+ *op_errstr = gf_strdup(msg);
ret = -1;
goto out;
+ }
}
+ ret = 0;
+ }
- ret = dict_get_int32 (dict, "type", &type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get type for "
- "operation");
-
- *op_errstr = gf_strdup ("Staging stage failed for bitrot "
- "operation.");
- goto out;
- }
-
-
- if ((GF_BITROT_OPTION_TYPE_ENABLE != type) &&
- (glusterd_is_bitrot_enabled (volinfo) == 0)) {
- ret = -1;
- gf_asprintf (op_errstr, "Bitrot is not enabled on volume %s",
- volname);
- goto out;
- }
-
- if ((GF_BITROT_OPTION_TYPE_SCRUB == type)) {
- ret = dict_get_str (volinfo->dict, "features.scrub",
- &scrub_cmd_from_dict);
- if (!ret) {
- ret = dict_get_str (dict, "scrub-value", &scrub_cmd);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to "
- "get scrub-value");
- *op_errstr = gf_strdup ("Staging failed for "
- "bitrot operation. "
- "Please check log file"
- " for more details.");
- goto out;
- }
- /* If scrubber is resume then value of scrubber will be
- * "Active" in the dictionary. */
- if (!strcmp (scrub_cmd_from_dict, scrub_cmd) ||
- (!strncmp ("Active", scrub_cmd_from_dict,
- SLEN ("Active")) && !strncmp ("resume",
- scrub_cmd, SLEN ("resume")))) {
- snprintf (msg, sizeof (msg), "Scrub is already"
- " %sd for volume %s", scrub_cmd,
- volinfo->volname);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
- }
- ret = 0;
- }
-
- out:
- if (ret && op_errstr && *op_errstr)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_STAGE_BITROT_FAIL, "%s", *op_errstr);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
+out:
+ if (ret && op_errstr && *op_errstr)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_BITROT_FAIL, "%s",
+ *op_errstr);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 73dcfaaa2b6..d4bc8b21cad 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -28,31 +28,32 @@
/* misc */
gf_boolean_t
-glusterd_is_tiering_supported (char *op_errstr)
+glusterd_is_tiering_supported(char *op_errstr)
{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- gf_boolean_t supported = _gf_false;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_boolean_t supported = _gf_false;
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, conf, out);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
- if (conf->op_version < GD_OP_VERSION_3_7_0)
- goto out;
+ if (conf->op_version < GD_OP_VERSION_3_7_0)
+ goto out;
- supported = _gf_true;
+ supported = _gf_true;
out:
- if (!supported && op_errstr != NULL && conf)
- sprintf (op_errstr, "Tier operation failed. The cluster is "
- "operating at version %d. Tiering"
- " is unavailable in this version.",
- conf->op_version);
-
- return supported;
+ if (!supported && op_errstr != NULL && conf)
+ sprintf(op_errstr,
+ "Tier operation failed. The cluster is "
+ "operating at version %d. Tiering"
+ " is unavailable in this version.",
+ conf->op_version);
+
+ return supported;
}
/* In this function, we decide, based on the 'count' of the brick,
@@ -60,2394 +61,2369 @@ out:
how many of the given bricks are added. other argument are self-
descriptive. */
int
-add_brick_at_right_order (glusterd_brickinfo_t *brickinfo,
- glusterd_volinfo_t *volinfo, int count,
- int32_t stripe_cnt, int32_t replica_cnt)
+add_brick_at_right_order(glusterd_brickinfo_t *brickinfo,
+ glusterd_volinfo_t *volinfo, int count,
+ int32_t stripe_cnt, int32_t replica_cnt)
{
- int idx = 0;
- int i = 0;
- int sub_cnt = 0;
- glusterd_brickinfo_t *brick = NULL;
-
- /* The complexity of the function is in deciding at which index
- to add new brick. Even though it can be defined with a complex
- single formula for all volume, it is separated out to make it
- more readable */
- if (stripe_cnt) {
- /* common formula when 'stripe_count' is set */
- /* idx = ((count / ((stripe_cnt * volinfo->replica_count) -
- volinfo->dist_leaf_count)) * volinfo->dist_leaf_count) +
- (count + volinfo->dist_leaf_count);
- */
-
- sub_cnt = volinfo->dist_leaf_count;
-
- idx = ((count / ((stripe_cnt * volinfo->replica_count) -
- sub_cnt)) * sub_cnt) +
- (count + sub_cnt);
-
- goto insert_brick;
- }
-
- /* replica count is set */
- /* common formula when 'replica_count' is set */
- /* idx = ((count / (replica_cnt - existing_replica_count)) *
- existing_replica_count) +
- (count + existing_replica_count);
+ int idx = 0;
+ int i = 0;
+ int sub_cnt = 0;
+ glusterd_brickinfo_t *brick = NULL;
+
+ /* The complexity of the function is in deciding at which index
+ to add new brick. Even though it can be defined with a complex
+ single formula for all volume, it is separated out to make it
+ more readable */
+ if (stripe_cnt) {
+ /* common formula when 'stripe_count' is set */
+ /* idx = ((count / ((stripe_cnt * volinfo->replica_count) -
+ volinfo->dist_leaf_count)) * volinfo->dist_leaf_count) +
+ (count + volinfo->dist_leaf_count);
*/
- sub_cnt = volinfo->replica_count;
- idx = (count / (replica_cnt - sub_cnt) * sub_cnt) +
- (count + sub_cnt);
+ sub_cnt = volinfo->dist_leaf_count;
-insert_brick:
- i = 0;
- cds_list_for_each_entry (brick, &volinfo->bricks, brick_list) {
- i++;
- if (i < idx)
- continue;
- gf_msg_debug (THIS->name, 0, "brick:%s index=%d, count=%d",
- brick->path, idx, count);
-
- cds_list_add (&brickinfo->brick_list, &brick->brick_list);
- break;
- }
+ idx = ((count / ((stripe_cnt * volinfo->replica_count) - sub_cnt)) *
+ sub_cnt) +
+ (count + sub_cnt);
- return 0;
-}
+ goto insert_brick;
+ }
+ /* replica count is set */
+ /* common formula when 'replica_count' is set */
+ /* idx = ((count / (replica_cnt - existing_replica_count)) *
+ existing_replica_count) +
+ (count + existing_replica_count);
+ */
+
+ sub_cnt = volinfo->replica_count;
+ idx = (count / (replica_cnt - sub_cnt) * sub_cnt) + (count + sub_cnt);
+
+insert_brick:
+ i = 0;
+ cds_list_for_each_entry(brick, &volinfo->bricks, brick_list)
+ {
+ i++;
+ if (i < idx)
+ continue;
+ gf_msg_debug(THIS->name, 0, "brick:%s index=%d, count=%d", brick->path,
+ idx, count);
+
+ cds_list_add(&brickinfo->brick_list, &brick->brick_list);
+ break;
+ }
+
+ return 0;
+}
static int
-gd_addbr_validate_stripe_count (glusterd_volinfo_t *volinfo, int stripe_count,
- int total_bricks, int *type, char *err_str,
- size_t err_len)
+gd_addbr_validate_stripe_count(glusterd_volinfo_t *volinfo, int stripe_count,
+ int total_bricks, int *type, char *err_str,
+ size_t err_len)
{
- int ret = -1;
+ int ret = -1;
- switch (volinfo->type) {
+ switch (volinfo->type) {
case GF_CLUSTER_TYPE_NONE:
- if ((volinfo->brick_count * stripe_count) == total_bricks) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_STRIPE;
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'distribute' to 'stripe'", volinfo->volname);
- ret = 0;
- goto out;
- } else {
- snprintf (err_str, err_len, "Incorrect number of "
- "bricks (%d) supplied for stripe count (%d).",
- (total_bricks - volinfo->brick_count),
- stripe_count);
- gf_msg (THIS->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
- goto out;
- }
- break;
+ if ((volinfo->brick_count * stripe_count) == total_bricks) {
+ /* Change the volume type */
+ *type = GF_CLUSTER_TYPE_STRIPE;
+ gf_msg(THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_TYPE_CHANGING_INFO,
+ "Changing the type of volume %s from "
+ "'distribute' to 'stripe'",
+ volinfo->volname);
+ ret = 0;
+ goto out;
+ } else {
+ snprintf(err_str, err_len,
+ "Incorrect number of "
+ "bricks (%d) supplied for stripe count (%d).",
+ (total_bricks - volinfo->brick_count), stripe_count);
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "%s", err_str);
+ goto out;
+ }
+ break;
case GF_CLUSTER_TYPE_REPLICATE:
- if (!(total_bricks % (volinfo->replica_count * stripe_count))) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE;
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'replicate' to 'replicate-stripe'",
- volinfo->volname);
- ret = 0;
- goto out;
- } else {
- snprintf (err_str, err_len, "Incorrect number of "
- "bricks (%d) supplied for changing volume's "
- "stripe count to %d, need at least %d bricks",
- (total_bricks - volinfo->brick_count),
- stripe_count,
- (volinfo->replica_count * stripe_count));
- gf_msg (THIS->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
- goto out;
- }
- break;
+ if (!(total_bricks % (volinfo->replica_count * stripe_count))) {
+ /* Change the volume type */
+ *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE;
+ gf_msg(THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_TYPE_CHANGING_INFO,
+ "Changing the type of volume %s from "
+ "'replicate' to 'replicate-stripe'",
+ volinfo->volname);
+ ret = 0;
+ goto out;
+ } else {
+ snprintf(err_str, err_len,
+ "Incorrect number of "
+ "bricks (%d) supplied for changing volume's "
+ "stripe count to %d, need at least %d bricks",
+ (total_bricks - volinfo->brick_count), stripe_count,
+ (volinfo->replica_count * stripe_count));
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "%s", err_str);
+ goto out;
+ }
+ break;
case GF_CLUSTER_TYPE_STRIPE:
case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
- if (stripe_count < volinfo->stripe_count) {
- snprintf (err_str, err_len,
- "Incorrect stripe count (%d) supplied. "
- "Volume already has stripe count (%d)",
- stripe_count, volinfo->stripe_count);
- gf_msg (THIS->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
- goto out;
- }
- if (stripe_count == volinfo->stripe_count) {
- if (!(total_bricks % volinfo->dist_leaf_count)) {
- /* its same as the one which exists */
- ret = 1;
- goto out;
- }
- }
- if (stripe_count > volinfo->stripe_count) {
- /* We have to make sure before and after 'add-brick',
- the number or subvolumes for distribute will remain
- same, when stripe count is given */
- if ((volinfo->brick_count * (stripe_count *
- volinfo->replica_count)) ==
- (total_bricks * volinfo->dist_leaf_count)) {
- /* Change the dist_leaf_count */
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- GD_MSG_STRIPE_COUNT_CHANGE_INFO,
- "Changing the stripe count of "
- "volume %s from %d to %d",
- volinfo->volname,
- volinfo->stripe_count, stripe_count);
- ret = 0;
- goto out;
- }
- }
- break;
+ if (stripe_count < volinfo->stripe_count) {
+ snprintf(err_str, err_len,
+ "Incorrect stripe count (%d) supplied. "
+ "Volume already has stripe count (%d)",
+ stripe_count, volinfo->stripe_count);
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "%s", err_str);
+ goto out;
+ }
+ if (stripe_count == volinfo->stripe_count) {
+ if (!(total_bricks % volinfo->dist_leaf_count)) {
+ /* its same as the one which exists */
+ ret = 1;
+ goto out;
+ }
+ }
+ if (stripe_count > volinfo->stripe_count) {
+ /* We have to make sure before and after 'add-brick',
+ the number or subvolumes for distribute will remain
+ same, when stripe count is given */
+ if ((volinfo->brick_count *
+ (stripe_count * volinfo->replica_count)) ==
+ (total_bricks * volinfo->dist_leaf_count)) {
+ /* Change the dist_leaf_count */
+ gf_msg(THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_STRIPE_COUNT_CHANGE_INFO,
+ "Changing the stripe count of "
+ "volume %s from %d to %d",
+ volinfo->volname, volinfo->stripe_count,
+ stripe_count);
+ ret = 0;
+ goto out;
+ }
+ }
+ break;
case GF_CLUSTER_TYPE_DISPERSE:
- snprintf (err_str, err_len, "Volume %s cannot be converted "
- "from dispersed to striped-"
- "dispersed", volinfo->volname);
- gf_msg(THIS->name, GF_LOG_ERROR, EPERM,
- GD_MSG_OP_NOT_PERMITTED, "%s", err_str);
- goto out;
- }
+ snprintf(err_str, err_len,
+ "Volume %s cannot be converted "
+ "from dispersed to striped-"
+ "dispersed",
+ volinfo->volname);
+ gf_msg(THIS->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED,
+ "%s", err_str);
+ goto out;
+ }
out:
- return ret;
+ return ret;
}
static int
-gd_addbr_validate_replica_count (glusterd_volinfo_t *volinfo, int replica_count,
- int arbiter_count, int total_bricks, int *type,
- char *err_str, int err_len)
+gd_addbr_validate_replica_count(glusterd_volinfo_t *volinfo, int replica_count,
+ int arbiter_count, int total_bricks, int *type,
+ char *err_str, int err_len)
{
- int ret = -1;
+ int ret = -1;
- /* replica count is set */
- switch (volinfo->type) {
+ /* replica count is set */
+ switch (volinfo->type) {
case GF_CLUSTER_TYPE_NONE:
- if ((volinfo->brick_count * replica_count) == total_bricks) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_REPLICATE;
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'distribute' to 'replica'", volinfo->volname);
- ret = 0;
- goto out;
+ if ((volinfo->brick_count * replica_count) == total_bricks) {
+ /* Change the volume type */
+ *type = GF_CLUSTER_TYPE_REPLICATE;
+ gf_msg(THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_TYPE_CHANGING_INFO,
+ "Changing the type of volume %s from "
+ "'distribute' to 'replica'",
+ volinfo->volname);
+ ret = 0;
+ goto out;
- } else {
- snprintf (err_str, err_len, "Incorrect number of "
- "bricks (%d) supplied for replica count (%d).",
- (total_bricks - volinfo->brick_count),
- replica_count);
- gf_msg (THIS->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
- goto out;
- }
- break;
+ } else {
+ snprintf(err_str, err_len,
+ "Incorrect number of "
+ "bricks (%d) supplied for replica count (%d).",
+ (total_bricks - volinfo->brick_count), replica_count);
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "%s", err_str);
+ goto out;
+ }
+ break;
case GF_CLUSTER_TYPE_STRIPE:
- if (!(total_bricks % (volinfo->dist_leaf_count * replica_count))) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE;
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'stripe' to 'replicate-stripe'",
- volinfo->volname);
- ret = 0;
- goto out;
- } else {
- snprintf (err_str, err_len, "Incorrect number of "
- "bricks (%d) supplied for changing volume's "
- "replica count to %d, need at least %d "
- "bricks",
- (total_bricks - volinfo->brick_count),
- replica_count, (volinfo->dist_leaf_count *
- replica_count));
- gf_msg (THIS->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
- goto out;
- }
- break;
+ if (!(total_bricks % (volinfo->dist_leaf_count * replica_count))) {
+ /* Change the volume type */
+ *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE;
+ gf_msg(THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_VOL_TYPE_CHANGING_INFO,
+ "Changing the type of volume %s from "
+ "'stripe' to 'replicate-stripe'",
+ volinfo->volname);
+ ret = 0;
+ goto out;
+ } else {
+ snprintf(err_str, err_len,
+ "Incorrect number of "
+ "bricks (%d) supplied for changing volume's "
+ "replica count to %d, need at least %d "
+ "bricks",
+ (total_bricks - volinfo->brick_count), replica_count,
+ (volinfo->dist_leaf_count * replica_count));
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "%s", err_str);
+ goto out;
+ }
+ break;
case GF_CLUSTER_TYPE_REPLICATE:
case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
- if (replica_count < volinfo->replica_count) {
- snprintf (err_str, err_len,
- "Incorrect replica count (%d) supplied. "
- "Volume already has (%d)",
- replica_count, volinfo->replica_count);
- gf_msg (THIS->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
- goto out;
- }
- if (replica_count == volinfo->replica_count) {
- if (arbiter_count && !volinfo->arbiter_count) {
- snprintf (err_str, err_len,
- "Cannot convert replica 3 volume "
- "to arbiter volume.");
- gf_msg (THIS->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
- goto out;
- }
- if (!(total_bricks % volinfo->dist_leaf_count)) {
- ret = 1;
- goto out;
- }
- }
- if (replica_count > volinfo->replica_count) {
- /* We have to make sure before and after 'add-brick',
- the number or subvolumes for distribute will remain
- same, when replica count is given */
- if ((total_bricks * volinfo->dist_leaf_count) ==
- (volinfo->brick_count * (replica_count *
- volinfo->stripe_count))) {
- /* Change the dist_leaf_count */
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- GD_MSG_REPLICA_COUNT_CHANGE_INFO,
- "Changing the replica count of "
- "volume %s from %d to %d",
- volinfo->volname, volinfo->replica_count,
- replica_count);
- ret = 0;
- goto out;
- }
- }
- break;
+ if (replica_count < volinfo->replica_count) {
+ snprintf(err_str, err_len,
+ "Incorrect replica count (%d) supplied. "
+ "Volume already has (%d)",
+ replica_count, volinfo->replica_count);
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "%s", err_str);
+ goto out;
+ }
+ if (replica_count == volinfo->replica_count) {
+ if (arbiter_count && !volinfo->arbiter_count) {
+ snprintf(err_str, err_len,
+ "Cannot convert replica 3 volume "
+ "to arbiter volume.");
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_INVALID_ENTRY, "%s", err_str);
+ goto out;
+ }
+ if (!(total_bricks % volinfo->dist_leaf_count)) {
+ ret = 1;
+ goto out;
+ }
+ }
+ if (replica_count > volinfo->replica_count) {
+ /* We have to make sure before and after 'add-brick',
+ the number or subvolumes for distribute will remain
+ same, when replica count is given */
+ if ((total_bricks * volinfo->dist_leaf_count) ==
+ (volinfo->brick_count *
+ (replica_count * volinfo->stripe_count))) {
+ /* Change the dist_leaf_count */
+ gf_msg(THIS->name, GF_LOG_INFO, 0,
+ GD_MSG_REPLICA_COUNT_CHANGE_INFO,
+ "Changing the replica count of "
+ "volume %s from %d to %d",
+ volinfo->volname, volinfo->replica_count,
+ replica_count);
+ ret = 0;
+ goto out;
+ }
+ }
+ break;
case GF_CLUSTER_TYPE_DISPERSE:
- snprintf (err_str, err_len, "Volume %s cannot be converted "
- "from dispersed to replicated-"
- "dispersed", volinfo->volname);
- gf_msg(THIS->name, GF_LOG_ERROR, EPERM,
- GD_MSG_OP_NOT_PERMITTED, "%s", err_str);
- goto out;
- }
+ snprintf(err_str, err_len,
+ "Volume %s cannot be converted "
+ "from dispersed to replicated-"
+ "dispersed",
+ volinfo->volname);
+ gf_msg(THIS->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED,
+ "%s", err_str);
+ goto out;
+ }
out:
- return ret;
+ return ret;
}
static int
-gd_rmbr_validate_replica_count (glusterd_volinfo_t *volinfo,
- int32_t replica_count,
- int32_t brick_count, char *err_str,
- size_t err_len)
+gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
+ int32_t replica_count, int32_t brick_count,
+ char *err_str, size_t err_len)
{
- int ret = -1;
- int replica_nodes = 0;
+ int ret = -1;
+ int replica_nodes = 0;
- switch (volinfo->type) {
+ switch (volinfo->type) {
case GF_CLUSTER_TYPE_TIER:
- ret = 1;
- goto out;
+ ret = 1;
+ goto out;
case GF_CLUSTER_TYPE_NONE:
case GF_CLUSTER_TYPE_STRIPE:
case GF_CLUSTER_TYPE_DISPERSE:
- snprintf (err_str, err_len,
- "replica count (%d) option given for non replicate "
- "volume %s", replica_count, volinfo->volname);
- gf_msg (THIS->name, GF_LOG_WARNING, 0,
- GD_MSG_VOL_NOT_REPLICA, "%s", err_str);
- goto out;
+ snprintf(err_str, err_len,
+ "replica count (%d) option given for non replicate "
+ "volume %s",
+ replica_count, volinfo->volname);
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_REPLICA, "%s",
+ err_str);
+ goto out;
case GF_CLUSTER_TYPE_REPLICATE:
case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
- /* in remove brick, you can only reduce the replica count */
- if (replica_count > volinfo->replica_count) {
- snprintf (err_str, err_len,
- "given replica count (%d) option is more "
- "than volume %s's replica count (%d)",
- replica_count, volinfo->volname,
- volinfo->replica_count);
- gf_msg (THIS->name, GF_LOG_WARNING, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
- goto out;
- }
- if (replica_count == volinfo->replica_count) {
- /* This means the 'replica N' option on CLI was
- redundant. Check if the total number of bricks given
- for removal is same as 'dist_leaf_count' */
- if (brick_count % volinfo->dist_leaf_count) {
- snprintf (err_str, err_len,
- "number of bricks provided (%d) is "
- "not valid. need at least %d "
- "(or %dxN)", brick_count,
- volinfo->dist_leaf_count,
- volinfo->dist_leaf_count);
- gf_msg (THIS->name, GF_LOG_WARNING, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s",
- err_str);
- goto out;
- }
- ret = 1;
- goto out;
+ /* in remove brick, you can only reduce the replica count */
+ if (replica_count > volinfo->replica_count) {
+ snprintf(err_str, err_len,
+ "given replica count (%d) option is more "
+ "than volume %s's replica count (%d)",
+ replica_count, volinfo->volname,
+ volinfo->replica_count);
+ gf_msg(THIS->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY,
+ "%s", err_str);
+ goto out;
+ }
+ if (replica_count == volinfo->replica_count) {
+ /* This means the 'replica N' option on CLI was
+ redundant. Check if the total number of bricks given
+ for removal is same as 'dist_leaf_count' */
+ if (brick_count % volinfo->dist_leaf_count) {
+ snprintf(err_str, err_len,
+ "number of bricks provided (%d) is "
+ "not valid. need at least %d "
+ "(or %dxN)",
+ brick_count, volinfo->dist_leaf_count,
+ volinfo->dist_leaf_count);
+ gf_msg(THIS->name, GF_LOG_WARNING, EINVAL,
+ GD_MSG_INVALID_ENTRY, "%s", err_str);
+ goto out;
}
+ ret = 1;
+ goto out;
+ }
- replica_nodes = ((volinfo->brick_count /
- volinfo->replica_count) *
- (volinfo->replica_count - replica_count));
+ replica_nodes = ((volinfo->brick_count / volinfo->replica_count) *
+ (volinfo->replica_count - replica_count));
- if (brick_count % replica_nodes) {
- snprintf (err_str, err_len,
- "need %d(xN) bricks for reducing replica "
- "count of the volume from %d to %d",
- replica_nodes, volinfo->replica_count,
- replica_count);
- goto out;
- }
- break;
- }
+ if (brick_count % replica_nodes) {
+ snprintf(err_str, err_len,
+ "need %d(xN) bricks for reducing replica "
+ "count of the volume from %d to %d",
+ replica_nodes, volinfo->replica_count, replica_count);
+ goto out;
+ }
+ break;
+ }
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
/* Handler functions */
int
-__glusterd_handle_add_brick (rpcsvc_request_t *req)
+__glusterd_handle_add_brick(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- dict_t *dict = NULL;
- char *bricks = NULL;
- char *volname = NULL;
- int brick_count = 0;
- void *cli_rsp = NULL;
- char err_str[2048] = "";
- gf_cli_rsp rsp = {0,};
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- int total_bricks = 0;
- int32_t replica_count = 0;
- int32_t arbiter_count = 0;
- int32_t stripe_count = 0;
- int type = 0;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- GF_ASSERT(this);
-
- GF_ASSERT (req);
-
- conf = this->private;
- GF_ASSERT (conf);
-
- ret = xdr_to_generic (req->msg[0], &cli_req,
- (xdrproc_t)xdr_gf_cli_req);
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ char *bricks = NULL;
+ char *volname = NULL;
+ int brick_count = 0;
+ void *cli_rsp = NULL;
+ char err_str[2048] = "";
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ int total_bricks = 0;
+ int32_t replica_count = 0;
+ int32_t arbiter_count = 0;
+ int32_t stripe_count = 0;
+ int type = 0;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(req);
+
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ snprintf(err_str, sizeof(err_str), "Garbage args received");
+ goto out;
+ }
+
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_ADD_BRICK_REQ_RECVD,
+ "Received add brick req");
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- snprintf (err_str, sizeof (err_str), "Garbage args received");
- goto out;
- }
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_ADD_BRICK_REQ_RECVD, "Received add brick req");
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- }
- }
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
-
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "name");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- if (!(ret = glusterd_check_volume_exists (volname))) {
- ret = -1;
- snprintf (err_str, sizeof (err_str), "Volume %s does not exist",
- volname);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "%s", err_str);
- goto out;
- }
-
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &brick_count);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "brick count");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- ret = dict_get_int32n (dict, "replica-count", SLEN ("replica-count"),
- &replica_count);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_SUCCESS, "replica-count is %d",
- replica_count);
- }
-
- ret = dict_get_int32n (dict, "arbiter-count", SLEN ("arbiter-count"),
- &arbiter_count);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_SUCCESS, "arbiter-count is %d",
- arbiter_count);
- }
-
- ret = dict_get_int32n (dict, "stripe-count", SLEN ("stripe-count"),
- &stripe_count);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_SUCCESS, "stripe-count is %d",
- stripe_count);
- }
-
- if (!dict_getn (dict, "force", SLEN ("force"))) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Failed to get flag");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volinfo "
- "for volume name %s", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL, "%s", err_str);
- goto out;
-
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ }
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volume "
+ "name");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ if (!(ret = glusterd_check_volume_exists(volname))) {
+ ret = -1;
+ snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
+ err_str);
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volume "
+ "brick count");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
+ &replica_count);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
+ "replica-count is %d", replica_count);
+ }
+
+ ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
+ &arbiter_count);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
+ "arbiter-count is %d", arbiter_count);
+ }
+
+ ret = dict_get_int32n(dict, "stripe-count", SLEN("stripe-count"),
+ &stripe_count);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
+ "stripe-count is %d", stripe_count);
+ }
+
+ if (!dict_getn(dict, "force", SLEN("force"))) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Failed to get flag");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volinfo "
+ "for volume name %s",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s",
+ err_str);
+ goto out;
+ }
+
+ total_bricks = volinfo->brick_count + brick_count;
+
+ if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ snprintf(err_str, sizeof(err_str), "Volume %s is already a tier.",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_TIER, "%s",
+ err_str);
+ ret = -1;
+ goto out;
}
- total_bricks = volinfo->brick_count + brick_count;
-
- if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- snprintf (err_str, sizeof (err_str),
- "Volume %s is already a tier.", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_ALREADY_TIER, "%s", err_str);
- ret = -1;
- goto out;
- }
-
- if (glusterd_is_tiering_supported(err_str) == _gf_false) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VERSION_UNSUPPORTED,
- "Tiering not supported at this version");
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n (dict, "hot-type", SLEN ("hot-type"), &type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED,
- "failed to get type from dictionary");
- goto out;
- }
-
- goto brick_val;
+ if (glusterd_is_tiering_supported(err_str) == _gf_false) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
+ "Tiering not supported at this version");
+ ret = -1;
+ goto out;
}
- ret = glusterd_disallow_op_for_tier (volinfo, GD_OP_ADD_BRICK, -1);
+ ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type);
if (ret) {
- snprintf (err_str, sizeof (err_str), "Add-brick operation is "
- "not supported on a tiered volume %s", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_UNSUPPORTED, "%s", err_str);
- goto out;
- }
-
- if (!stripe_count && !replica_count) {
- if (volinfo->type == GF_CLUSTER_TYPE_NONE)
- goto brick_val;
-
- if ((volinfo->brick_count < volinfo->dist_leaf_count) &&
- (total_bricks <= volinfo->dist_leaf_count))
- goto brick_val;
-
- if ((brick_count % volinfo->dist_leaf_count) != 0) {
- snprintf (err_str, sizeof (err_str), "Incorrect number "
- "of bricks supplied %d with count %d",
- brick_count, volinfo->dist_leaf_count);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_REPLICA, "%s", err_str);
- ret = -1;
- goto out;
- }
- goto brick_val;
- /* done with validation.. below section is if stripe|replica
- count is given */
- }
-
- /* These bricks needs to be added one per a replica or stripe volume */
- if (stripe_count) {
- ret = gd_addbr_validate_stripe_count (volinfo, stripe_count,
- total_bricks, &type,
- err_str,
- sizeof (err_str));
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COUNT_VALIDATE_FAILED, "%s", err_str);
- goto out;
- }
-
- /* if stripe count is same as earlier, set it back to 0 */
- if (ret == 1)
- stripe_count = 0;
-
- ret = dict_set_int32n (dict, "stripe-count",
- SLEN ("stripe-count"), stripe_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED,
- "failed to set the stripe-count in dict");
- goto out;
- }
- goto brick_val;
- }
-
- ret = gd_addbr_validate_replica_count (volinfo, replica_count,
- arbiter_count, total_bricks,
- &type, err_str,
- sizeof (err_str));
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "failed to get type from dictionary");
+ goto out;
+ }
+
+ goto brick_val;
+ }
+
+ ret = glusterd_disallow_op_for_tier(volinfo, GD_OP_ADD_BRICK, -1);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Add-brick operation is "
+ "not supported on a tiered volume %s",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_UNSUPPORTED, "%s",
+ err_str);
+ goto out;
+ }
+
+ if (!stripe_count && !replica_count) {
+ if (volinfo->type == GF_CLUSTER_TYPE_NONE)
+ goto brick_val;
+
+ if ((volinfo->brick_count < volinfo->dist_leaf_count) &&
+ (total_bricks <= volinfo->dist_leaf_count))
+ goto brick_val;
+
+ if ((brick_count % volinfo->dist_leaf_count) != 0) {
+ snprintf(err_str, sizeof(err_str),
+ "Incorrect number "
+ "of bricks supplied %d with count %d",
+ brick_count, volinfo->dist_leaf_count);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_REPLICA,
+ "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+ goto brick_val;
+ /* done with validation.. below section is if stripe|replica
+ count is given */
+ }
+
+ /* These bricks needs to be added one per a replica or stripe volume */
+ if (stripe_count) {
+ ret = gd_addbr_validate_stripe_count(volinfo, stripe_count,
+ total_bricks, &type, err_str,
+ sizeof(err_str));
if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COUNT_VALIDATE_FAILED, "%s", err_str);
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COUNT_VALIDATE_FAILED,
+ "%s", err_str);
+ goto out;
}
- /* if replica count is same as earlier, set it back to 0 */
+ /* if stripe count is same as earlier, set it back to 0 */
if (ret == 1)
- replica_count = 0;
+ stripe_count = 0;
- ret = dict_set_int32n (dict, "replica-count",
- SLEN ("replica-count"), replica_count);
+ ret = dict_set_int32n(dict, "stripe-count", SLEN("stripe-count"),
+ stripe_count);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "failed to set the replica-count in dict");
- goto out;
- }
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "failed to set the stripe-count in dict");
+ goto out;
+ }
+ goto brick_val;
+ }
+
+ ret = gd_addbr_validate_replica_count(volinfo, replica_count, arbiter_count,
+ total_bricks, &type, err_str,
+ sizeof(err_str));
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COUNT_VALIDATE_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ /* if replica count is same as earlier, set it back to 0 */
+ if (ret == 1)
+ replica_count = 0;
+
+ ret = dict_set_int32n(dict, "replica-count", SLEN("replica-count"),
+ replica_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "failed to set the replica-count in dict");
+ goto out;
+ }
brick_val:
- ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks);
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volume "
+ "bricks");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ if (type != volinfo->type) {
+ ret = dict_set_int32n(dict, "type", SLEN("type"), type);
if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "bricks");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- if (type != volinfo->type) {
- ret = dict_set_int32n (dict, "type", SLEN ("type"), type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "failed to set the new type in dict");
- goto out;
- }
- }
-
- if (conf->op_version <= GD_OP_VERSION_3_7_5) {
- gf_msg_debug (this->name, 0, "The cluster is operating at "
- "version less than or equal to %d. Falling back "
- "to syncop framework.",
- GD_OP_VERSION_3_7_5);
- ret = glusterd_op_begin_synctask (req, GD_OP_ADD_BRICK, dict);
- } else {
- ret = glusterd_mgmt_v3_initiate_all_phases (req,
- GD_OP_ADD_BRICK,
- dict);
- }
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "failed to set the new type in dict");
+ goto out;
+ }
+ }
+
+ if (conf->op_version <= GD_OP_VERSION_3_7_5) {
+ gf_msg_debug(this->name, 0,
+ "The cluster is operating at "
+ "version less than or equal to %d. Falling back "
+ "to syncop framework.",
+ GD_OP_VERSION_3_7_5);
+ ret = glusterd_op_begin_synctask(req, GD_OP_ADD_BRICK, dict);
+ } else {
+ ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_ADD_BRICK, dict);
+ }
out:
- if (ret) {
- rsp.op_ret = -1;
- rsp.op_errno = 0;
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str), "Operation failed");
- rsp.op_errstr = err_str;
- cli_rsp = &rsp;
- glusterd_to_cli (req, cli_rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp, dict);
- ret = 0; //sent error to cli, prevent second reply
- }
+ if (ret) {
+ rsp.op_ret = -1;
+ rsp.op_errno = 0;
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ rsp.op_errstr = err_str;
+ cli_rsp = &rsp;
+ glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
+ dict);
+ ret = 0; // sent error to cli, prevent second reply
+ }
- free (cli_req.dict.dict_val); //its malloced by xdr
+ free(cli_req.dict.dict_val); // its malloced by xdr
- return ret;
+ return ret;
}
int
-glusterd_handle_add_brick (rpcsvc_request_t *req)
+glusterd_handle_add_brick(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_add_brick);
+ return glusterd_big_locked_handler(req, __glusterd_handle_add_brick);
}
static int
-subvol_matcher_init (int **subvols, int count)
+subvol_matcher_init(int **subvols, int count)
{
- int ret = -1;
+ int ret = -1;
- *subvols = GF_CALLOC (count, sizeof(int), gf_gld_mt_int);
- if (*subvols)
- ret = 0;
+ *subvols = GF_CALLOC(count, sizeof(int), gf_gld_mt_int);
+ if (*subvols)
+ ret = 0;
- return ret;
+ return ret;
}
static void
-subvol_matcher_update (int *subvols, glusterd_volinfo_t *volinfo,
- glusterd_brickinfo_t *brickinfo)
+subvol_matcher_update(int *subvols, glusterd_volinfo_t *volinfo,
+ glusterd_brickinfo_t *brickinfo)
{
- glusterd_brickinfo_t *tmp = NULL;
- int32_t sub_volume = 0;
- int pos = 0;
-
- cds_list_for_each_entry (tmp, &volinfo->bricks, brick_list) {
-
- if (strcmp (tmp->hostname, brickinfo->hostname) ||
- strcmp (tmp->path, brickinfo->path)) {
- pos++;
- continue;
- }
- gf_msg_debug (THIS->name, 0, LOGSTR_FOUND_BRICK,
- brickinfo->hostname, brickinfo->path,
- volinfo->volname);
- sub_volume = (pos / volinfo->dist_leaf_count);
- subvols[sub_volume]++;
- break;
- }
-
+ glusterd_brickinfo_t *tmp = NULL;
+ int32_t sub_volume = 0;
+ int pos = 0;
+
+ cds_list_for_each_entry(tmp, &volinfo->bricks, brick_list)
+ {
+ if (strcmp(tmp->hostname, brickinfo->hostname) ||
+ strcmp(tmp->path, brickinfo->path)) {
+ pos++;
+ continue;
+ }
+ gf_msg_debug(THIS->name, 0, LOGSTR_FOUND_BRICK, brickinfo->hostname,
+ brickinfo->path, volinfo->volname);
+ sub_volume = (pos / volinfo->dist_leaf_count);
+ subvols[sub_volume]++;
+ break;
+ }
}
static int
-subvol_matcher_verify (int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
- size_t err_len, char *vol_type, int replica_count)
+subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
+ size_t err_len, char *vol_type, int replica_count)
{
- int i = 0;
- int ret = 0;
- int count = volinfo->replica_count-replica_count;
-
- if (replica_count) {
- for (i = 0; i < volinfo->subvol_count; i++) {
- if (subvols[i] != count) {
- ret = -1;
- snprintf (err_str, err_len, "Remove exactly %d"
- " brick(s) from each subvolume.", count);
- break;
- }
- }
- return ret;
- }
+ int i = 0;
+ int ret = 0;
+ int count = volinfo->replica_count - replica_count;
- do {
+ if (replica_count) {
+ for (i = 0; i < volinfo->subvol_count; i++) {
+ if (subvols[i] != count) {
+ ret = -1;
+ snprintf(err_str, err_len,
+ "Remove exactly %d"
+ " brick(s) from each subvolume.",
+ count);
+ break;
+ }
+ }
+ return ret;
+ }
- if (subvols[i] % volinfo->dist_leaf_count == 0) {
- continue;
- } else {
- ret = -1;
- snprintf (err_str, err_len,
- "Bricks not from same subvol for %s", vol_type);
- break;
- }
- } while (++i < volinfo->subvol_count);
+ do {
+ if (subvols[i] % volinfo->dist_leaf_count == 0) {
+ continue;
+ } else {
+ ret = -1;
+ snprintf(err_str, err_len, "Bricks not from same subvol for %s",
+ vol_type);
+ break;
+ }
+ } while (++i < volinfo->subvol_count);
- return ret;
+ return ret;
}
static void
-subvol_matcher_destroy (int *subvols)
+subvol_matcher_destroy(int *subvols)
{
- GF_FREE (subvols);
+ GF_FREE(subvols);
}
int
glusterd_set_detach_bricks(dict_t *dict, glusterd_volinfo_t *volinfo)
{
- char key[64] = "";
- char value[2048] = ""; /* hostname + path */
- int brick_num = 0;
- int hot_brick_num = 0;
- glusterd_brickinfo_t *brickinfo;
- int ret = 0;
- int32_t len = 0;
-
- /* cold tier bricks at tail of list so use reverse iteration */
- cds_list_for_each_entry_reverse (brickinfo, &volinfo->bricks,
- brick_list) {
- brick_num++;
- if (brick_num > volinfo->tier_info.cold_brick_count) {
- hot_brick_num++;
- sprintf (key, "brick%d", hot_brick_num);
- len = snprintf (value, sizeof(value), "%s:%s",
- brickinfo->hostname,
- brickinfo->path);
- if ((len < 0) || (len >= sizeof(value))) {
- return -1;
- }
+ char key[64] = "";
+ char value[2048] = ""; /* hostname + path */
+ int brick_num = 0;
+ int hot_brick_num = 0;
+ glusterd_brickinfo_t *brickinfo;
+ int ret = 0;
+ int32_t len = 0;
+
+ /* cold tier bricks at tail of list so use reverse iteration */
+ cds_list_for_each_entry_reverse(brickinfo, &volinfo->bricks, brick_list)
+ {
+ brick_num++;
+ if (brick_num > volinfo->tier_info.cold_brick_count) {
+ hot_brick_num++;
+ sprintf(key, "brick%d", hot_brick_num);
+ len = snprintf(value, sizeof(value), "%s:%s", brickinfo->hostname,
+ brickinfo->path);
+ if ((len < 0) || (len >= sizeof(value))) {
+ return -1;
+ }
- ret = dict_set_str (dict, key, strdup(value));
- if (ret)
- break;
- }
+ ret = dict_set_str(dict, key, strdup(value));
+ if (ret)
+ break;
}
+ }
- ret = dict_set_int32n (dict, "count", SLEN ("count"), hot_brick_num);
- if (ret)
- return -1;
+ ret = dict_set_int32n(dict, "count", SLEN("count"), hot_brick_num);
+ if (ret)
+ return -1;
- return hot_brick_num;
+ return hot_brick_num;
}
static int
-glusterd_remove_brick_validate_arbiters (glusterd_volinfo_t *volinfo,
- int32_t count, int32_t replica_count,
- glusterd_brickinfo_t **brickinfo_list,
- char *err_str, size_t err_len)
+glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
+ int32_t count, int32_t replica_count,
+ glusterd_brickinfo_t **brickinfo_list,
+ char *err_str, size_t err_len)
{
- int i = 0;
- int ret = 0;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *last = NULL;
- char *arbiter_array = NULL;
-
- if ((volinfo->type != GF_CLUSTER_TYPE_REPLICATE) &&
- (volinfo->type != GF_CLUSTER_TYPE_STRIPE_REPLICATE))
- goto out;
-
- if (!replica_count || !volinfo->arbiter_count)
+ int i = 0;
+ int ret = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *last = NULL;
+ char *arbiter_array = NULL;
+
+ if ((volinfo->type != GF_CLUSTER_TYPE_REPLICATE) &&
+ (volinfo->type != GF_CLUSTER_TYPE_STRIPE_REPLICATE))
+ goto out;
+
+ if (!replica_count || !volinfo->arbiter_count)
+ goto out;
+
+ if (replica_count == 2) {
+ /* If it is an arbiter to replica 2 conversion, only permit
+ * removal of the arbiter brick.*/
+ for (i = 0; i < count; i++) {
+ brickinfo = brickinfo_list[i];
+ last = get_last_brick_of_brick_group(volinfo, brickinfo);
+ if (last != brickinfo) {
+ snprintf(err_str, err_len,
+ "Remove arbiter "
+ "brick(s) only when converting from "
+ "arbiter to replica 2 subvolume.");
+ ret = -1;
goto out;
-
- if (replica_count == 2) {
- /* If it is an arbiter to replica 2 conversion, only permit
- * removal of the arbiter brick.*/
- for (i = 0; i < count; i++) {
- brickinfo = brickinfo_list[i];
- last = get_last_brick_of_brick_group (volinfo,
- brickinfo);
- if (last != brickinfo) {
- snprintf (err_str, err_len, "Remove arbiter "
- "brick(s) only when converting from "
- "arbiter to replica 2 subvolume.");
- ret = -1;
- goto out;
- }
- }
- } else if (replica_count == 1) {
- /* If it is an arbiter to plain distribute conversion, in every
- * replica subvol, the arbiter has to be one of the bricks that
- * are removed. */
- arbiter_array = GF_CALLOC (volinfo->subvol_count,
- sizeof (*arbiter_array),
- gf_common_mt_char);
- if (!arbiter_array)
- return -1;
- for (i = 0; i < count; i++) {
- brickinfo = brickinfo_list[i];
- last = get_last_brick_of_brick_group (volinfo,
- brickinfo);
- if (last == brickinfo)
- arbiter_array[brickinfo->group] = 1;
- }
- for (i = 0; i < volinfo->subvol_count; i++)
- if (!arbiter_array[i]) {
- snprintf (err_str, err_len, "Removed bricks "
- "must contain arbiter when converting"
- " to plain distrubute.");
- ret = -1;
- break;
- }
- GF_FREE (arbiter_array);
- }
+ }
+ }
+ } else if (replica_count == 1) {
+ /* If it is an arbiter to plain distribute conversion, in every
+ * replica subvol, the arbiter has to be one of the bricks that
+ * are removed. */
+ arbiter_array = GF_CALLOC(volinfo->subvol_count, sizeof(*arbiter_array),
+ gf_common_mt_char);
+ if (!arbiter_array)
+ return -1;
+ for (i = 0; i < count; i++) {
+ brickinfo = brickinfo_list[i];
+ last = get_last_brick_of_brick_group(volinfo, brickinfo);
+ if (last == brickinfo)
+ arbiter_array[brickinfo->group] = 1;
+ }
+ for (i = 0; i < volinfo->subvol_count; i++)
+ if (!arbiter_array[i]) {
+ snprintf(err_str, err_len,
+ "Removed bricks "
+ "must contain arbiter when converting"
+ " to plain distrubute.");
+ ret = -1;
+ break;
+ }
+ GF_FREE(arbiter_array);
+ }
out:
- return ret;
+ return ret;
}
int
-__glusterd_handle_remove_brick (rpcsvc_request_t *req)
+__glusterd_handle_remove_brick(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- dict_t *dict = NULL;
- int32_t count = 0;
- char *brick = NULL;
- char key[64] = "";
- int keylen;
- int i = 1;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t **brickinfo_list = NULL;
- int *subvols = NULL;
- char err_str[2048] = "";
- gf_cli_rsp rsp = {0,};
- void *cli_rsp = NULL;
- char vol_type[256] = "";
- int32_t replica_count = 0;
- char *volname = 0;
- xlator_t *this = NULL;
- int cmd = -1;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &cli_req,
- (xdrproc_t)xdr_gf_cli_req);
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ int32_t count = 0;
+ char *brick = NULL;
+ char key[64] = "";
+ int keylen;
+ int i = 1;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t **brickinfo_list = NULL;
+ int *subvols = NULL;
+ char err_str[2048] = "";
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ void *cli_rsp = NULL;
+ char vol_type[256] = "";
+ int32_t replica_count = 0;
+ char *volname = 0;
+ xlator_t *this = NULL;
+ int cmd = -1;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ snprintf(err_str, sizeof(err_str), "Received garbage args");
+ goto out;
+ }
+
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_REM_BRICK_REQ_RECVD,
+ "Received rem brick req");
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- snprintf (err_str, sizeof (err_str), "Received garbage args");
- goto out;
- }
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_REM_BRICK_REQ_RECVD,
- "Received rem brick req");
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- }
- }
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "name");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &count);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get brick "
- "count");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (err_str, sizeof (err_str),"Volume %s does not exist",
- volname);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "%s", err_str);
- goto out;
- }
-
- if ((volinfo->type == GF_CLUSTER_TYPE_TIER) &&
- (glusterd_is_tiering_supported(err_str) == _gf_false)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VERSION_UNSUPPORTED,
- "Tiering not supported at this version");
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n (dict, "command", SLEN ("command"), &cmd);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get cmd "
- "ccommand");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ }
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volume "
+ "name");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get brick "
+ "count");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
+ err_str);
+ goto out;
+ }
+
+ if ((volinfo->type == GF_CLUSTER_TYPE_TIER) &&
+ (glusterd_is_tiering_supported(err_str) == _gf_false)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
+ "Tiering not supported at this version");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "command", SLEN("command"), &cmd);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get cmd "
+ "ccommand");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ ret = glusterd_disallow_op_for_tier(volinfo, GD_OP_REMOVE_BRICK, cmd);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Removing brick from a Tier volume is not allowed");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_UNSUPPORTED, "%s",
+ err_str);
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
+ &replica_count);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
+ "request to change replica-count to %d", replica_count);
+ ret = gd_rmbr_validate_replica_count(volinfo, replica_count, count,
+ err_str, sizeof(err_str));
+ if (ret < 0) {
+ /* logging and error msg are done in above function
+ itself */
+ goto out;
}
-
- ret = glusterd_disallow_op_for_tier (volinfo, GD_OP_REMOVE_BRICK, cmd);
+ dict_deln(dict, "replica-count", SLEN("replica-count"));
if (ret) {
- snprintf (err_str, sizeof (err_str),
- "Removing brick from a Tier volume is not allowed");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_UNSUPPORTED, "%s", err_str);
- goto out;
- }
-
- ret = dict_get_int32n (dict, "replica-count", SLEN ("replica-count"),
- &replica_count);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_FAILED,
- "request to change replica-count to %d", replica_count);
- ret = gd_rmbr_validate_replica_count (volinfo, replica_count,
- count, err_str,
- sizeof (err_str));
- if (ret < 0) {
- /* logging and error msg are done in above function
- itself */
- goto out;
- }
- dict_deln (dict, "replica-count", SLEN ("replica-count"));
- if (ret) {
- replica_count = 0;
- } else {
- ret = dict_set_int32n (dict, "replica-count",
- SLEN ("replica-count"),
- replica_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, errno,
- GD_MSG_DICT_SET_FAILED,
- "failed to set the replica_count "
- "in dict");
- goto out;
- }
- }
- }
-
- /* 'vol_type' is used for giving the meaning full error msg for user */
- if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
- strcpy (vol_type, "replica");
- } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE) {
- strcpy (vol_type, "stripe");
- } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) {
- strcpy (vol_type, "stripe-replicate");
- } else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
- strcpy (vol_type, "disperse");
+ replica_count = 0;
} else {
- strcpy (vol_type, "distribute");
- }
+ ret = dict_set_int32n(dict, "replica-count", SLEN("replica-count"),
+ replica_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_SET_FAILED,
+ "failed to set the replica_count "
+ "in dict");
+ goto out;
+ }
+ }
+ }
+
+ /* 'vol_type' is used for giving the meaning full error msg for user */
+ if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
+ strcpy(vol_type, "replica");
+ } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE) {
+ strcpy(vol_type, "stripe");
+ } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) {
+ strcpy(vol_type, "stripe-replicate");
+ } else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
+ strcpy(vol_type, "disperse");
+ } else {
+ strcpy(vol_type, "distribute");
+ }
+
+ /* Do not allow remove-brick if the volume is a stripe volume*/
+ if ((volinfo->type == GF_CLUSTER_TYPE_STRIPE) &&
+ (volinfo->brick_count == volinfo->stripe_count)) {
+ snprintf(err_str, sizeof(err_str),
+ "Removing brick from a stripe volume is not allowed");
+ gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED, "%s",
+ err_str);
+ ret = -1;
+ goto out;
+ }
+
+ if (!replica_count && (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) &&
+ (volinfo->brick_count == volinfo->dist_leaf_count)) {
+ snprintf(err_str, sizeof(err_str),
+ "Removing bricks from stripe-replicate"
+ " configuration is not allowed without reducing "
+ "replica or stripe count explicitly.");
+ gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED_AC_REQD,
+ "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+
+ if (!replica_count && (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
+ (volinfo->brick_count == volinfo->dist_leaf_count)) {
+ snprintf(err_str, sizeof(err_str),
+ "Removing bricks from replicate configuration "
+ "is not allowed without reducing replica count "
+ "explicitly.");
+ gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED_AC_REQD,
+ "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+
+ /* Do not allow remove-brick if the bricks given is less than
+ the replica count or stripe count */
+ if (!replica_count && (volinfo->type != GF_CLUSTER_TYPE_NONE) &&
+ (volinfo->type != GF_CLUSTER_TYPE_TIER)) {
+ if (volinfo->dist_leaf_count && (count % volinfo->dist_leaf_count)) {
+ snprintf(err_str, sizeof(err_str),
+ "Remove brick "
+ "incorrect brick count of %d for %s %d",
+ count, vol_type, volinfo->dist_leaf_count);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
+ err_str);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ /* subvol match is not required for tiered volume*/
+ if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
+ (volinfo->type != GF_CLUSTER_TYPE_TIER) &&
+ (volinfo->subvol_count > 1)) {
+ ret = subvol_matcher_init(&subvols, volinfo->subvol_count);
+ if (ret)
+ goto out;
+ }
- /* Do not allow remove-brick if the volume is a stripe volume*/
- if ((volinfo->type == GF_CLUSTER_TYPE_STRIPE) &&
- (volinfo->brick_count == volinfo->stripe_count)) {
- snprintf (err_str, sizeof (err_str),
- "Removing brick from a stripe volume is not allowed");
- gf_msg (this->name, GF_LOG_ERROR, EPERM,
- GD_MSG_OP_NOT_PERMITTED, "%s", err_str);
- ret = -1;
- goto out;
- }
-
- if (!replica_count &&
- (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) &&
- (volinfo->brick_count == volinfo->dist_leaf_count)) {
- snprintf (err_str, sizeof(err_str),
- "Removing bricks from stripe-replicate"
- " configuration is not allowed without reducing "
- "replica or stripe count explicitly.");
- gf_msg (this->name, GF_LOG_ERROR, EPERM,
- GD_MSG_OP_NOT_PERMITTED_AC_REQD, "%s", err_str);
- ret = -1;
- goto out;
- }
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER)
+ count = glusterd_set_detach_bricks(dict, volinfo);
- if (!replica_count &&
- (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
- (volinfo->brick_count == volinfo->dist_leaf_count)) {
- snprintf (err_str, sizeof (err_str),
- "Removing bricks from replicate configuration "
- "is not allowed without reducing replica count "
- "explicitly.");
- gf_msg (this->name, GF_LOG_ERROR, EPERM,
- GD_MSG_OP_NOT_PERMITTED_AC_REQD, "%s", err_str);
- ret = -1;
- goto out;
- }
-
- /* Do not allow remove-brick if the bricks given is less than
- the replica count or stripe count */
- if (!replica_count && (volinfo->type != GF_CLUSTER_TYPE_NONE) &&
- (volinfo->type != GF_CLUSTER_TYPE_TIER)) {
- if (volinfo->dist_leaf_count &&
- (count % volinfo->dist_leaf_count)) {
- snprintf (err_str, sizeof (err_str), "Remove brick "
- "incorrect brick count of %d for %s %d",
- count, vol_type, volinfo->dist_leaf_count);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
- ret = -1;
- goto out;
- }
- }
+ brickinfo_list = GF_CALLOC(count, sizeof(*brickinfo_list),
+ gf_common_mt_pointer);
+ if (!brickinfo_list) {
+ ret = -1;
+ goto out;
+ }
- /* subvol match is not required for tiered volume*/
- if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
- (volinfo->type != GF_CLUSTER_TYPE_TIER) &&
- (volinfo->subvol_count > 1)) {
- ret = subvol_matcher_init (&subvols, volinfo->subvol_count);
- if (ret)
- goto out;
+ while (i <= count) {
+ keylen = snprintf(key, sizeof(key), "brick%d", i);
+ ret = dict_get_strn(dict, key, keylen, &brick);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str), "Unable to get %s", key);
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "%s", err_str);
+ goto out;
}
+ gf_msg_debug(this->name, 0,
+ "Remove brick count %d brick:"
+ " %s",
+ i, brick);
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- count = glusterd_set_detach_bricks(dict, volinfo);
-
- brickinfo_list = GF_CALLOC (count, sizeof (*brickinfo_list),
- gf_common_mt_pointer);
- if (!brickinfo_list) {
- ret = -1;
- goto out;
- }
+ ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
+ _gf_false);
- while ( i <= count) {
- keylen = snprintf (key, sizeof (key), "brick%d", i);
- ret = dict_get_strn (dict, key, keylen, &brick);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get %s",
- key);
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
- gf_msg_debug (this->name, 0, "Remove brick count %d brick:"
- " %s", i, brick);
-
- ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo,
- &brickinfo,
- _gf_false);
-
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Incorrect brick "
- "%s for volume %s", brick, volname);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_BRICK_NOT_FOUND, "%s", err_str);
- goto out;
- }
- brickinfo_list[i-1] = brickinfo;
-
- i++;
- if ((volinfo->type == GF_CLUSTER_TYPE_NONE) ||
- (volinfo->brick_count <= volinfo->dist_leaf_count))
- continue;
-
- /* Find which subvolume the brick belongs to.
- * subvol match is not required for tiered volume
- *
- */
- if (volinfo->type != GF_CLUSTER_TYPE_TIER)
- subvol_matcher_update (subvols, volinfo, brickinfo);
- }
-
- /* Check if the bricks belong to the same subvolumes.*/
- /* subvol match is not required for tiered volume*/
- if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
- (volinfo->type != GF_CLUSTER_TYPE_TIER) &&
- (volinfo->subvol_count > 1)) {
- ret = subvol_matcher_verify (subvols, volinfo,
- err_str, sizeof(err_str),
- vol_type, replica_count);
- if (ret)
- goto out;
- }
-
- ret = glusterd_remove_brick_validate_arbiters (volinfo, count,
- replica_count,
- brickinfo_list,
- err_str,
- sizeof (err_str));
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Incorrect brick "
+ "%s for volume %s",
+ brick, volname);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_BRICK_NOT_FOUND,
+ "%s", err_str);
+ goto out;
+ }
+ brickinfo_list[i - 1] = brickinfo;
+
+ i++;
+ if ((volinfo->type == GF_CLUSTER_TYPE_NONE) ||
+ (volinfo->brick_count <= volinfo->dist_leaf_count))
+ continue;
+
+ /* Find which subvolume the brick belongs to.
+ * subvol match is not required for tiered volume
+ *
+ */
+ if (volinfo->type != GF_CLUSTER_TYPE_TIER)
+ subvol_matcher_update(subvols, volinfo, brickinfo);
+ }
+
+ /* Check if the bricks belong to the same subvolumes.*/
+ /* subvol match is not required for tiered volume*/
+ if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
+ (volinfo->type != GF_CLUSTER_TYPE_TIER) &&
+ (volinfo->subvol_count > 1)) {
+ ret = subvol_matcher_verify(subvols, volinfo, err_str, sizeof(err_str),
+ vol_type, replica_count);
if (ret)
- goto out;
+ goto out;
+ }
- ret = glusterd_op_begin_synctask (req, GD_OP_REMOVE_BRICK, dict);
+ ret = glusterd_remove_brick_validate_arbiters(volinfo, count, replica_count,
+ brickinfo_list, err_str,
+ sizeof(err_str));
+ if (ret)
+ goto out;
-out:
- if (ret) {
- rsp.op_ret = -1;
- rsp.op_errno = 0;
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_OP_FAILED, "%s", err_str);
- rsp.op_errstr = err_str;
- cli_rsp = &rsp;
- glusterd_to_cli (req, cli_rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp, dict);
+ ret = glusterd_op_begin_synctask(req, GD_OP_REMOVE_BRICK, dict);
- ret = 0; //sent error to cli, prevent second reply
+out:
+ if (ret) {
+ rsp.op_ret = -1;
+ rsp.op_errno = 0;
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_OP_FAILED, "%s",
+ err_str);
+ rsp.op_errstr = err_str;
+ cli_rsp = &rsp;
+ glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
+ dict);
- }
+ ret = 0; // sent error to cli, prevent second reply
+ }
- if (brickinfo_list)
- GF_FREE (brickinfo_list);
- subvol_matcher_destroy (subvols);
- free (cli_req.dict.dict_val); //its malloced by xdr
+ if (brickinfo_list)
+ GF_FREE(brickinfo_list);
+ subvol_matcher_destroy(subvols);
+ free(cli_req.dict.dict_val); // its malloced by xdr
- return ret;
+ return ret;
}
int
-glusterd_handle_remove_brick (rpcsvc_request_t *req)
+glusterd_handle_remove_brick(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_remove_brick);
+ return glusterd_big_locked_handler(req, __glusterd_handle_remove_brick);
}
static int
-_glusterd_restart_gsync_session (dict_t *this, char *key,
- data_t *value, void *data)
+_glusterd_restart_gsync_session(dict_t *this, char *key, data_t *value,
+ void *data)
{
- char *slave = NULL;
- char *slave_buf = NULL;
- char *path_list = NULL;
- char *slave_vol = NULL;
- char *slave_host = NULL;
- char *slave_url = NULL;
- char *conf_path = NULL;
- char **errmsg = NULL;
- int ret = -1;
- glusterd_gsync_status_temp_t *param = NULL;
- gf_boolean_t is_running = _gf_false;
-
- param = (glusterd_gsync_status_temp_t *)data;
-
- GF_ASSERT (param);
- GF_ASSERT (param->volinfo);
-
- slave = strchr(value->data, ':');
- if (slave) {
- slave++;
- slave_buf = gf_strdup (slave);
- if (!slave_buf) {
- gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY,
- "Failed to gf_strdup");
- ret = -1;
- goto out;
- }
- }
- else
- return 0;
-
- ret = dict_set_dynstrn (param->rsp_dict, "slave", SLEN ("slave"),
- slave_buf);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "Unable to store slave");
- if (slave_buf)
- GF_FREE(slave_buf);
- goto out;
- }
-
- ret = glusterd_get_slave_details_confpath (param->volinfo,
- param->rsp_dict, &slave_url,
- &slave_host, &slave_vol,
- &conf_path, errmsg);
- if (ret) {
- if (*errmsg)
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL,
- "%s", *errmsg);
- else
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL,
- "Unable to fetch slave or confpath details.");
- goto out;
- }
-
- /* In cases that gsyncd is not running, we will not invoke it
- * because of add-brick. */
- ret = glusterd_check_gsync_running_local (param->volinfo->volname,
- slave, conf_path,
- &is_running);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_GSYNC_VALIDATION_FAIL, "gsync running validation failed.");
- goto out;
- }
- if (_gf_false == is_running) {
- gf_msg_debug ("glusterd", 0, "gsync session for %s and %s is"
- " not running on this node. Hence not restarting.",
- param->volinfo->volname, slave);
- ret = 0;
- goto out;
- }
+ char *slave = NULL;
+ char *slave_buf = NULL;
+ char *path_list = NULL;
+ char *slave_vol = NULL;
+ char *slave_host = NULL;
+ char *slave_url = NULL;
+ char *conf_path = NULL;
+ char **errmsg = NULL;
+ int ret = -1;
+ glusterd_gsync_status_temp_t *param = NULL;
+ gf_boolean_t is_running = _gf_false;
+
+ param = (glusterd_gsync_status_temp_t *)data;
+
+ GF_ASSERT(param);
+ GF_ASSERT(param->volinfo);
+
+ slave = strchr(value->data, ':');
+ if (slave) {
+ slave++;
+ slave_buf = gf_strdup(slave);
+ if (!slave_buf) {
+ gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Failed to gf_strdup");
+ ret = -1;
+ goto out;
+ }
+ } else
+ return 0;
- ret = glusterd_get_local_brickpaths (param->volinfo, &path_list);
- if (!path_list) {
- gf_msg_debug ("glusterd", 0, "This node not being part of"
- " volume should not be running gsyncd. Hence"
- " no gsyncd process to restart.");
- ret = 0;
- goto out;
- }
+ ret = dict_set_dynstrn(param->rsp_dict, "slave", SLEN("slave"), slave_buf);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Unable to store slave");
+ if (slave_buf)
+ GF_FREE(slave_buf);
+ goto out;
+ }
+
+ ret = glusterd_get_slave_details_confpath(param->volinfo, param->rsp_dict,
+ &slave_url, &slave_host,
+ &slave_vol, &conf_path, errmsg);
+ if (ret) {
+ if (*errmsg)
+ gf_msg("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL, "%s", *errmsg);
+ else
+ gf_msg("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL,
+ "Unable to fetch slave or confpath details.");
+ goto out;
+ }
+
+ /* In cases that gsyncd is not running, we will not invoke it
+ * because of add-brick. */
+ ret = glusterd_check_gsync_running_local(param->volinfo->volname, slave,
+ conf_path, &is_running);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_GSYNC_VALIDATION_FAIL,
+ "gsync running validation failed.");
+ goto out;
+ }
+ if (_gf_false == is_running) {
+ gf_msg_debug("glusterd", 0,
+ "gsync session for %s and %s is"
+ " not running on this node. Hence not restarting.",
+ param->volinfo->volname, slave);
+ ret = 0;
+ goto out;
+ }
+
+ ret = glusterd_get_local_brickpaths(param->volinfo, &path_list);
+ if (!path_list) {
+ gf_msg_debug("glusterd", 0,
+ "This node not being part of"
+ " volume should not be running gsyncd. Hence"
+ " no gsyncd process to restart.");
+ ret = 0;
+ goto out;
+ }
- ret = glusterd_check_restart_gsync_session (param->volinfo, slave,
- param->rsp_dict, path_list,
- conf_path, 0);
- if (ret)
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_GSYNC_RESTART_FAIL,
- "Unable to restart gsync session.");
+ ret = glusterd_check_restart_gsync_session(
+ param->volinfo, slave, param->rsp_dict, path_list, conf_path, 0);
+ if (ret)
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_GSYNC_RESTART_FAIL,
+ "Unable to restart gsync session.");
out:
- gf_msg_debug ("glusterd", 0, "Returning %d.", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "Returning %d.", ret);
+ return ret;
}
/* op-sm */
int
-glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
- char *bricks, dict_t *dict)
+glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
+ char *bricks, dict_t *dict)
{
- char *brick = NULL;
- int32_t i = 1;
- char *brick_list = NULL;
- char *free_ptr1 = NULL;
- char *free_ptr2 = NULL;
- char *saveptr = NULL;
- int32_t ret = -1;
- int32_t stripe_count = 0;
- int32_t replica_count = 0;
- int32_t arbiter_count = 0;
- int32_t type = 0;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_gsync_status_temp_t param = {0, };
- gf_boolean_t restart_needed = 0;
- int caps = 0;
- int brickid = 0;
- char key[64] = "";
- char *brick_mount_dir = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- gf_boolean_t is_valid_add_brick = _gf_false;
- struct statvfs brickstat = {0,};
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (volinfo);
-
- conf = this->private;
- GF_ASSERT (conf);
-
- if (bricks) {
- brick_list = gf_strdup (bricks);
- free_ptr1 = brick_list;
- }
-
- if (count)
- brick = strtok_r (brick_list+1, " \n", &saveptr);
-
- if (dict) {
- ret = dict_get_int32n (dict, "stripe-count",
- SLEN ("stripe-count"), &stripe_count);
- if (!ret)
- gf_msg (THIS->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_SUCCESS,
- "stripe-count is set %d", stripe_count);
-
- ret = dict_get_int32n (dict, "replica-count",
- SLEN ("replica-count"), &replica_count);
- if (!ret)
- gf_msg (THIS->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_SUCCESS,
- "replica-count is set %d", replica_count);
- ret = dict_get_int32n (dict, "arbiter-count",
- SLEN ("arbiter-count"), &arbiter_count);
- if (!ret)
- gf_msg (THIS->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_SUCCESS,
- "arbiter-count is set %d", arbiter_count);
- ret = dict_get_int32n (dict, "type", SLEN ("type"), &type);
- if (!ret)
- gf_msg (THIS->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_SUCCESS,
- "type is set %d, need to change it", type);
- }
-
- brickid = glusterd_get_next_available_brickid (volinfo);
- if (brickid < 0)
- goto out;
- while ( i <= count) {
- ret = glusterd_brickinfo_new_from_brick (brick, &brickinfo,
- _gf_true, NULL);
- if (ret)
- goto out;
-
- GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO (brickinfo, volinfo,
- brickid++);
-
- /* A bricks mount dir is required only by snapshots which were
- * introduced in gluster-3.6.0
- */
- if (conf->op_version >= GD_OP_VERSION_3_6_0) {
- brick_mount_dir = NULL;
-
- snprintf (key, sizeof(key), "brick%d.mount_dir", i);
- ret = dict_get_str (dict, key, &brick_mount_dir);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED,
- "%s not present", key);
- goto out;
- }
- strncpy (brickinfo->mount_dir, brick_mount_dir,
- sizeof(brickinfo->mount_dir));
- }
+ char *brick = NULL;
+ int32_t i = 1;
+ char *brick_list = NULL;
+ char *free_ptr1 = NULL;
+ char *free_ptr2 = NULL;
+ char *saveptr = NULL;
+ int32_t ret = -1;
+ int32_t stripe_count = 0;
+ int32_t replica_count = 0;
+ int32_t arbiter_count = 0;
+ int32_t type = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_gsync_status_temp_t param = {
+ 0,
+ };
+ gf_boolean_t restart_needed = 0;
+ int caps = 0;
+ int brickid = 0;
+ char key[64] = "";
+ char *brick_mount_dir = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_boolean_t is_valid_add_brick = _gf_false;
+ struct statvfs brickstat = {
+ 0,
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(volinfo);
+
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ if (bricks) {
+ brick_list = gf_strdup(bricks);
+ free_ptr1 = brick_list;
+ }
+
+ if (count)
+ brick = strtok_r(brick_list + 1, " \n", &saveptr);
+
+ if (dict) {
+ ret = dict_get_int32n(dict, "stripe-count", SLEN("stripe-count"),
+ &stripe_count);
+ if (!ret)
+ gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
+ "stripe-count is set %d", stripe_count);
- ret = glusterd_resolve_brick (brickinfo);
- if (ret)
- goto out;
-
- if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) {
- ret = sys_statvfs (brickinfo->path, &brickstat);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_STATVFS_FAILED,
- "Failed to fetch disk utilization "
- "from the brick (%s:%s). Please check the health of "
- "the brick. Error code was %s",
- brickinfo->hostname, brickinfo->path,
- strerror (errno));
-
- goto out;
- }
- brickinfo->statfs_fsid = brickstat.f_fsid;
- }
- /* hot tier bricks are added to head of brick list */
- if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) {
- cds_list_add (&brickinfo->brick_list, &volinfo->bricks);
- } else if (stripe_count || replica_count) {
- add_brick_at_right_order (brickinfo, volinfo, (i - 1),
- stripe_count, replica_count);
- } else {
- cds_list_add_tail (&brickinfo->brick_list,
- &volinfo->bricks);
- }
- brick = strtok_r (NULL, " \n", &saveptr);
- i++;
- volinfo->brick_count++;
+ ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
+ &replica_count);
+ if (!ret)
+ gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
+ "replica-count is set %d", replica_count);
+ ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
+ &arbiter_count);
+ if (!ret)
+ gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
+ "arbiter-count is set %d", arbiter_count);
+ ret = dict_get_int32n(dict, "type", SLEN("type"), &type);
+ if (!ret)
+ gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
+ "type is set %d, need to change it", type);
+ }
+
+ brickid = glusterd_get_next_available_brickid(volinfo);
+ if (brickid < 0)
+ goto out;
+ while (i <= count) {
+ ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true,
+ NULL);
+ if (ret)
+ goto out;
- }
+ GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO(brickinfo, volinfo, brickid++);
- /* Gets changed only if the options are given in add-brick cli */
- if (type)
- volinfo->type = type;
- /* performance.client-io-threads is turned on by default,
- * however this has adverse effects on replicate volumes due to
- * replication design issues, till that get addressed
- * performance.client-io-threads option is turned off for all
- * replicate volumes if not already explicitly enabled.
+ /* A bricks mount dir is required only by snapshots which were
+ * introduced in gluster-3.6.0
*/
- if (type && glusterd_is_volume_replicate (volinfo) &&
- conf->op_version >= GD_OP_VERSION_3_12_2) {
- ret = dict_set_nstrn (volinfo->dict,
- "performance.client-io-threads",
- SLEN ("performance.client-io-threads"),
- "off", SLEN ("off"));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to set "
- "performance.client-io-threads to off");
- goto out;
- }
- }
+ if (conf->op_version >= GD_OP_VERSION_3_6_0) {
+ brick_mount_dir = NULL;
- if (replica_count) {
- volinfo->replica_count = replica_count;
- }
- if (arbiter_count) {
- volinfo->arbiter_count = arbiter_count;
- }
- if (stripe_count) {
- volinfo->stripe_count = stripe_count;
+ snprintf(key, sizeof(key), "brick%d.mount_dir", i);
+ ret = dict_get_str(dict, key, &brick_mount_dir);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "%s not present", key);
+ goto out;
+ }
+ strncpy(brickinfo->mount_dir, brick_mount_dir,
+ sizeof(brickinfo->mount_dir));
}
- volinfo->dist_leaf_count = glusterd_get_dist_leaf_count (volinfo);
-
- /* backward compatibility */
- volinfo->sub_count = ((volinfo->dist_leaf_count == 1) ? 0:
- volinfo->dist_leaf_count);
-
- volinfo->subvol_count = (volinfo->brick_count /
- volinfo->dist_leaf_count);
- ret = 0;
- if (GLUSTERD_STATUS_STARTED != volinfo->status)
- goto generate_volfiles;
-
- ret = generate_brick_volfiles (volinfo);
+ ret = glusterd_resolve_brick(brickinfo);
if (ret)
- goto out;
-
- brick_list = gf_strdup (bricks);
- free_ptr2 = brick_list;
- i = 1;
-
- if (count)
- brick = strtok_r (brick_list+1, " \n", &saveptr);
+ goto out;
+
+ if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
+ ret = sys_statvfs(brickinfo->path, &brickstat);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_STATVFS_FAILED,
+ "Failed to fetch disk utilization "
+ "from the brick (%s:%s). Please check the health of "
+ "the brick. Error code was %s",
+ brickinfo->hostname, brickinfo->path, strerror(errno));
+
+ goto out;
+ }
+ brickinfo->statfs_fsid = brickstat.f_fsid;
+ }
+ /* hot tier bricks are added to head of brick list */
+ if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
+ cds_list_add(&brickinfo->brick_list, &volinfo->bricks);
+ } else if (stripe_count || replica_count) {
+ add_brick_at_right_order(brickinfo, volinfo, (i - 1), stripe_count,
+ replica_count);
+ } else {
+ cds_list_add_tail(&brickinfo->brick_list, &volinfo->bricks);
+ }
+ brick = strtok_r(NULL, " \n", &saveptr);
+ i++;
+ volinfo->brick_count++;
+ }
+
+ /* Gets changed only if the options are given in add-brick cli */
+ if (type)
+ volinfo->type = type;
+ /* performance.client-io-threads is turned on by default,
+ * however this has adverse effects on replicate volumes due to
+ * replication design issues, till that get addressed
+ * performance.client-io-threads option is turned off for all
+ * replicate volumes if not already explicitly enabled.
+ */
+ if (type && glusterd_is_volume_replicate(volinfo) &&
+ conf->op_version >= GD_OP_VERSION_3_12_2) {
+ ret = dict_set_nstrn(volinfo->dict, "performance.client-io-threads",
+ SLEN("performance.client-io-threads"), "off",
+ SLEN("off"));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set "
+ "performance.client-io-threads to off");
+ goto out;
+ }
+ }
+
+ if (replica_count) {
+ volinfo->replica_count = replica_count;
+ }
+ if (arbiter_count) {
+ volinfo->arbiter_count = arbiter_count;
+ }
+ if (stripe_count) {
+ volinfo->stripe_count = stripe_count;
+ }
+ volinfo->dist_leaf_count = glusterd_get_dist_leaf_count(volinfo);
+
+ /* backward compatibility */
+ volinfo->sub_count = ((volinfo->dist_leaf_count == 1)
+ ? 0
+ : volinfo->dist_leaf_count);
+
+ volinfo->subvol_count = (volinfo->brick_count / volinfo->dist_leaf_count);
+
+ ret = 0;
+ if (GLUSTERD_STATUS_STARTED != volinfo->status)
+ goto generate_volfiles;
+
+ ret = generate_brick_volfiles(volinfo);
+ if (ret)
+ goto out;
+
+ brick_list = gf_strdup(bricks);
+ free_ptr2 = brick_list;
+ i = 1;
+
+ if (count)
+ brick = strtok_r(brick_list + 1, " \n", &saveptr);
#ifdef HAVE_BD_XLATOR
- if (brickinfo->vg[0])
- caps = CAPS_BD | CAPS_THIN |
- CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
+ if (brickinfo->vg[0])
+ caps = CAPS_BD | CAPS_THIN | CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
#endif
- /* This check needs to be added to distinguish between
- * attach-tier commands and add-brick commands.
- * When a tier is attached, adding is done via add-brick
- * and setting of pending xattrs shouldn't be done for
- * attach-tiers as they are virtually new volumes.
- */
- if (glusterd_is_volume_replicate (volinfo)) {
- if (replica_count &&
- !dict_getn (dict, "attach-tier", SLEN ("attach-tier")) &&
- conf->op_version >= GD_OP_VERSION_3_7_10) {
- is_valid_add_brick = _gf_true;
- ret = generate_dummy_client_volfiles (volinfo);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Failed to create volfile.");
- goto out;
- }
- }
- }
-
- while (i <= count) {
- ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
- &brickinfo,
- _gf_true);
- if (ret)
- goto out;
+ /* This check needs to be added to distinguish between
+ * attach-tier commands and add-brick commands.
+ * When a tier is attached, adding is done via add-brick
+ * and setting of pending xattrs shouldn't be done for
+ * attach-tiers as they are virtually new volumes.
+ */
+ if (glusterd_is_volume_replicate(volinfo)) {
+ if (replica_count &&
+ !dict_getn(dict, "attach-tier", SLEN("attach-tier")) &&
+ conf->op_version >= GD_OP_VERSION_3_7_10) {
+ is_valid_add_brick = _gf_true;
+ ret = generate_dummy_client_volfiles(volinfo);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Failed to create volfile.");
+ goto out;
+ }
+ }
+ }
+
+ while (i <= count) {
+ ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
+ _gf_true);
+ if (ret)
+ goto out;
#ifdef HAVE_BD_XLATOR
- char msg[1024] = "";
- /* Check for VG/thin pool if its BD volume */
- if (brickinfo->vg[0]) {
- ret = glusterd_is_valid_vg (brickinfo, 0, msg);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_CRITICAL, 0,
- GD_MSG_INVALID_VG, "%s", msg);
- goto out;
- }
- /* if anyone of the brick does not have thin support,
- disable it for entire volume */
- caps &= brickinfo->caps;
- } else
- caps = 0;
+ char msg[1024] = "";
+ /* Check for VG/thin pool if its BD volume */
+ if (brickinfo->vg[0]) {
+ ret = glusterd_is_valid_vg(brickinfo, 0, msg);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_CRITICAL, 0, GD_MSG_INVALID_VG, "%s",
+ msg);
+ goto out;
+ }
+ /* if anyone of the brick does not have thin support,
+ disable it for entire volume */
+ caps &= brickinfo->caps;
+ } else
+ caps = 0;
#endif
- if (gf_uuid_is_null (brickinfo->uuid)) {
- ret = glusterd_resolve_brick (brickinfo);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_RESOLVE_BRICK_FAIL,
- FMTSTR_RESOLVE_BRICK,
- brickinfo->hostname, brickinfo->path);
- goto out;
- }
- }
-
- /* if the volume is a replicate volume, do: */
- if (is_valid_add_brick) {
- if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) {
- ret = glusterd_handle_replicate_brick_ops (
- volinfo, brickinfo,
- GD_OP_ADD_BRICK);
- if (ret < 0)
- goto out;
- }
- }
- ret = glusterd_brick_start (volinfo, brickinfo,
- _gf_true, _gf_false);
- if (ret)
- goto out;
- i++;
- brick = strtok_r (NULL, " \n", &saveptr);
-
- /* Check if the brick is added in this node, and set
- * the restart_needed flag. */
- if ((!gf_uuid_compare (brickinfo->uuid, MY_UUID)) &&
- !restart_needed) {
- restart_needed = 1;
- gf_msg_debug ("glusterd", 0,
- "Restart gsyncd session, if it's already "
- "running.");
- }
+ if (gf_uuid_is_null(brickinfo->uuid)) {
+ ret = glusterd_resolve_brick(brickinfo);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_RESOLVE_BRICK_FAIL,
+ FMTSTR_RESOLVE_BRICK, brickinfo->hostname,
+ brickinfo->path);
+ goto out;
+ }
}
- /* If the restart_needed flag is set, restart gsyncd sessions for that
- * particular master with all the slaves. */
- if (restart_needed) {
- param.rsp_dict = dict;
- param.volinfo = volinfo;
- dict_foreach (volinfo->gsync_slaves,
- _glusterd_restart_gsync_session, &param);
+ /* if the volume is a replicate volume, do: */
+ if (is_valid_add_brick) {
+ if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
+ ret = glusterd_handle_replicate_brick_ops(volinfo, brickinfo,
+ GD_OP_ADD_BRICK);
+ if (ret < 0)
+ goto out;
+ }
}
- volinfo->caps = caps;
+ ret = glusterd_brick_start(volinfo, brickinfo, _gf_true, _gf_false);
+ if (ret)
+ goto out;
+ i++;
+ brick = strtok_r(NULL, " \n", &saveptr);
+
+ /* Check if the brick is added in this node, and set
+ * the restart_needed flag. */
+ if ((!gf_uuid_compare(brickinfo->uuid, MY_UUID)) && !restart_needed) {
+ restart_needed = 1;
+ gf_msg_debug("glusterd", 0,
+ "Restart gsyncd session, if it's already "
+ "running.");
+ }
+ }
+
+ /* If the restart_needed flag is set, restart gsyncd sessions for that
+ * particular master with all the slaves. */
+ if (restart_needed) {
+ param.rsp_dict = dict;
+ param.volinfo = volinfo;
+ dict_foreach(volinfo->gsync_slaves, _glusterd_restart_gsync_session,
+ &param);
+ }
+ volinfo->caps = caps;
generate_volfiles:
- if (conf->op_version <= GD_OP_VERSION_3_7_5) {
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
- } else {
- /*
- * The cluster is operating at version greater than
- * gluster-3.7.5. So no need to sent volfile fetch
- * request in commit phase, the same will be done
- * in post validate phase with v3 framework.
- */
- }
+ if (conf->op_version <= GD_OP_VERSION_3_7_5) {
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ } else {
+ /*
+ * The cluster is operating at version greater than
+ * gluster-3.7.5. So no need to sent volfile fetch
+ * request in commit phase, the same will be done
+ * in post validate phase with v3 framework.
+ */
+ }
out:
- GF_FREE (free_ptr1);
- GF_FREE (free_ptr2);
+ GF_FREE(free_ptr1);
+ GF_FREE(free_ptr2);
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
-
int
-glusterd_op_perform_remove_brick (glusterd_volinfo_t *volinfo, char *brick,
- int force, int *need_migrate)
+glusterd_op_perform_remove_brick(glusterd_volinfo_t *volinfo, char *brick,
+ int force, int *need_migrate)
{
- glusterd_brickinfo_t *brickinfo = NULL;
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
- GF_ASSERT (volinfo);
- GF_ASSERT (brick);
+ GF_ASSERT(volinfo);
+ GF_ASSERT(brick);
- priv = THIS->private;
- GF_ASSERT (priv);
+ priv = THIS->private;
+ GF_ASSERT(priv);
- ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
- &brickinfo,
- _gf_false);
- if (ret)
- goto out;
+ ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
+ _gf_false);
+ if (ret)
+ goto out;
- ret = glusterd_resolve_brick (brickinfo);
- if (ret)
- goto out;
+ ret = glusterd_resolve_brick(brickinfo);
+ if (ret)
+ goto out;
- glusterd_volinfo_reset_defrag_stats (volinfo);
+ glusterd_volinfo_reset_defrag_stats(volinfo);
- if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) {
- /* Only if the brick is in this glusterd, do the rebalance */
- if (need_migrate)
- *need_migrate = 1;
- }
+ if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
+ /* Only if the brick is in this glusterd, do the rebalance */
+ if (need_migrate)
+ *need_migrate = 1;
+ }
- if (force) {
- ret = glusterd_brick_stop (volinfo, brickinfo,
- _gf_true);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_STOP_FAIL, "Unable to stop "
- "glusterfs, ret: %d", ret);
- }
- goto out;
+ if (force) {
+ ret = glusterd_brick_stop(volinfo, brickinfo, _gf_true);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_STOP_FAIL,
+ "Unable to stop "
+ "glusterfs, ret: %d",
+ ret);
}
+ goto out;
+ }
- brickinfo->decommissioned = 1;
- ret = 0;
+ brickinfo->decommissioned = 1;
+ ret = 0;
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- int ret = 0;
- char *volname = NULL;
- int count = 0;
- int replica_count = 0;
- int arbiter_count = 0;
- int i = 0;
- int32_t local_brick_count = 0;
- char *bricks = NULL;
- char *brick_list = NULL;
- char *saveptr = NULL;
- char *free_ptr = NULL;
- char *brick = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char msg[4096] = "";
- char key[64] = "";
- gf_boolean_t brick_alloc = _gf_false;
- char *all_bricks = NULL;
- char *str_ret = NULL;
- gf_boolean_t is_force = _gf_false;
- glusterd_conf_t *conf = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
+ int ret = 0;
+ char *volname = NULL;
+ int count = 0;
+ int replica_count = 0;
+ int arbiter_count = 0;
+ int i = 0;
+ int32_t local_brick_count = 0;
+ char *bricks = NULL;
+ char *brick_list = NULL;
+ char *saveptr = NULL;
+ char *free_ptr = NULL;
+ char *brick = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ char msg[4096] = "";
+ char key[64] = "";
+ gf_boolean_t brick_alloc = _gf_false;
+ char *all_bricks = NULL;
+ char *str_ret = NULL;
+ gf_boolean_t is_force = _gf_false;
+ glusterd_conf_t *conf = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Unable to find volume: %s", volname);
+ goto out;
+ }
+
+ ret = glusterd_validate_volume_id(dict, volinfo);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
+ &replica_count);
+ if (ret) {
+ gf_msg_debug(THIS->name, 0, "Unable to get replica count");
+ }
+
+ ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
+ &arbiter_count);
+ if (ret) {
+ gf_msg_debug(THIS->name, 0, "No arbiter count present in the dict");
+ }
+
+ if (replica_count > 0) {
+ ret = op_version_check(this, GD_OP_VER_PERSISTENT_AFR_XATTRS, msg,
+ sizeof(msg));
if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND,
- "Unable to find volume: %s", volname);
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH,
+ "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
}
+ }
- ret = glusterd_validate_volume_id (dict, volinfo);
- if (ret)
- goto out;
-
- ret = dict_get_int32n (dict, "replica-count", SLEN ("replica-count"),
- &replica_count);
- if (ret) {
- gf_msg_debug (THIS->name, 0,
- "Unable to get replica count");
- }
-
- ret = dict_get_int32n (dict, "arbiter-count",
- SLEN ("arbiter-count"), &arbiter_count);
- if (ret) {
- gf_msg_debug (THIS->name, 0,
- "No arbiter count present in the dict");
- }
-
- if (replica_count > 0) {
- ret = op_version_check (this, GD_OP_VER_PERSISTENT_AFR_XATTRS,
- msg, sizeof(msg));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_VERSION_MISMATCH, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- }
-
- if (glusterd_is_volume_replicate (volinfo)) {
- /* Do not allow add-brick for stopped volumes when replica-count
- * is being increased.
- */
- if (conf->op_version >= GD_OP_VERSION_3_7_10 &&
- !dict_getn (dict, "attach-tier", SLEN ("attach-tier")) &&
- replica_count &&
- GLUSTERD_STATUS_STOPPED == volinfo->status) {
- ret = -1;
- snprintf (msg, sizeof (msg), " Volume must not be in"
- " stopped state when replica-count needs to "
- " be increased.");
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_ADD_FAIL, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- /* op-version check for replica 2 to arbiter conversion. If we
- * don't have this check, an older peer added as arbiter brick
- * will not have the arbiter xlator in its volfile. */
- if ((conf->op_version < GD_OP_VERSION_3_8_0) &&
- (arbiter_count == 1) && (replica_count == 3)) {
- ret = -1;
- snprintf (msg, sizeof (msg), "Cluster op-version must "
- "be >= 30800 to add arbiter brick to a "
- "replica 2 volume.");
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_ADD_FAIL, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- /* Do not allow increasing replica count for arbiter volumes. */
- if (replica_count && volinfo->arbiter_count) {
- ret = -1;
- snprintf (msg, sizeof (msg), "Increasing replica count "
- "for arbiter volumes is not supported.");
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_ADD_FAIL, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- }
-
- is_force = dict_get_str_boolean (dict, "force", _gf_false);
-
- if (volinfo->replica_count < replica_count && !is_force) {
- cds_list_for_each_entry (brickinfo, &volinfo->bricks,
- brick_list) {
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID))
- continue;
- if (brickinfo->status == GF_BRICK_STOPPED) {
- ret = -1;
- len = snprintf (msg, sizeof (msg), "Brick %s "
- "is down, changing replica "
- "count needs all the bricks "
- "to be up to avoid data loss",
- brickinfo->path);
- if (len < 0) {
- strcpy(msg, "<error>");
- }
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_ADD_FAIL, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- }
- }
-
- if (conf->op_version > GD_OP_VERSION_3_7_5 &&
- is_origin_glusterd (dict)) {
- ret = glusterd_validate_quorum (this, GD_OP_ADD_BRICK, dict,
- op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_CRITICAL, 0,
- GD_MSG_SERVER_QUORUM_NOT_MET,
- "Server quorum not met. Rejecting operation.");
- goto out;
- }
- } else {
- /* Case 1: conf->op_version <= GD_OP_VERSION_3_7_5
- * in this case the add-brick is running
- * syncop framework that will do a quorum
- * check by default
- * Case 2: We don't need to do quorum check on every
- * node, only originator glusterd need to
- * check for quorum
- * So nothing need to be done in else
- */
- }
-
- if (glusterd_is_defrag_on(volinfo)) {
- snprintf (msg, sizeof(msg), "Volume name %s rebalance is in "
- "progress. Please retry after completion", volname);
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_OIP_RETRY_LATER, "%s", msg);
- *op_errstr = gf_strdup (msg);
+ if (glusterd_is_volume_replicate(volinfo)) {
+ /* Do not allow add-brick for stopped volumes when replica-count
+ * is being increased.
+ */
+ if (conf->op_version >= GD_OP_VERSION_3_7_10 &&
+ !dict_getn(dict, "attach-tier", SLEN("attach-tier")) &&
+ replica_count && GLUSTERD_STATUS_STOPPED == volinfo->status) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ " Volume must not be in"
+ " stopped state when replica-count needs to "
+ " be increased.");
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+ /* op-version check for replica 2 to arbiter conversion. If we
+ * don't have this check, an older peer added as arbiter brick
+ * will not have the arbiter xlator in its volfile. */
+ if ((conf->op_version < GD_OP_VERSION_3_8_0) && (arbiter_count == 1) &&
+ (replica_count == 3)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Cluster op-version must "
+ "be >= 30800 to add arbiter brick to a "
+ "replica 2 volume.");
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+ /* Do not allow increasing replica count for arbiter volumes. */
+ if (replica_count && volinfo->arbiter_count) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Increasing replica count "
+ "for arbiter volumes is not supported.");
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+ }
+
+ is_force = dict_get_str_boolean(dict, "force", _gf_false);
+
+ if (volinfo->replica_count < replica_count && !is_force) {
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
+ continue;
+ if (brickinfo->status == GF_BRICK_STOPPED) {
ret = -1;
- goto out;
- }
-
- if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) {
-
- /*
- * This check is needed because of add/remove brick
- * is not supported on a tiered volume. So once a tier
- * is attached we cannot commit or stop the remove-brick
- * task. Please change this comment once we start supporting
- * add/remove brick on a tiered volume.
- */
- if (!gd_is_remove_brick_committed (volinfo)) {
-
- snprintf (msg, sizeof (msg), "An earlier remove-brick "
- "task exists for volume %s. Either commit it"
- " or stop it before attaching a tier.",
- volinfo->volname);
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_OLD_REMOVE_BRICK_EXISTS, "%s", msg);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
+ len = snprintf(msg, sizeof(msg),
+ "Brick %s "
+ "is down, changing replica "
+ "count needs all the bricks "
+ "to be up to avoid data loss",
+ brickinfo->path);
+ if (len < 0) {
+ strcpy(msg, "<error>");
}
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
}
+ }
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &count);
+ if (conf->op_version > GD_OP_VERSION_3_7_5 && is_origin_glusterd(dict)) {
+ ret = glusterd_validate_quorum(this, GD_OP_ADD_BRICK, dict, op_errstr);
if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get count");
- goto out;
- }
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
+ "Server quorum not met. Rejecting operation.");
+ goto out;
+ }
+ } else {
+ /* Case 1: conf->op_version <= GD_OP_VERSION_3_7_5
+ * in this case the add-brick is running
+ * syncop framework that will do a quorum
+ * check by default
+ * Case 2: We don't need to do quorum check on every
+ * node, only originator glusterd need to
+ * check for quorum
+ * So nothing need to be done in else
+ */
+ }
+
+ if (glusterd_is_defrag_on(volinfo)) {
+ snprintf(msg, sizeof(msg),
+ "Volume name %s rebalance is in "
+ "progress. Please retry after completion",
+ volname);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ ret = -1;
+ goto out;
+ }
- ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks);
+ if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
+ /*
+ * This check is needed because of add/remove brick
+ * is not supported on a tiered volume. So once a tier
+ * is attached we cannot commit or stop the remove-brick
+ * task. Please change this comment once we start supporting
+ * add/remove brick on a tiered volume.
+ */
+ if (!gd_is_remove_brick_committed(volinfo)) {
+ snprintf(msg, sizeof(msg),
+ "An earlier remove-brick "
+ "task exists for volume %s. Either commit it"
+ " or stop it before attaching a tier.",
+ volinfo->volname);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OLD_REMOVE_BRICK_EXISTS,
+ "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get count");
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get bricks");
+ goto out;
+ }
+
+ if (bricks) {
+ brick_list = gf_strdup(bricks);
+ all_bricks = gf_strdup(bricks);
+ free_ptr = brick_list;
+ }
+
+ if (count)
+ brick = strtok_r(brick_list + 1, " \n", &saveptr);
+
+ while (i < count) {
+ if (!glusterd_store_is_valid_brickpath(volname, brick) ||
+ !glusterd_is_valid_volfpath(volname, brick)) {
+ snprintf(msg, sizeof(msg),
+ "brick path %s is "
+ "too long",
+ brick);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true,
+ NULL);
if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get bricks");
- goto out;
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
+ "Add-brick: Unable"
+ " to get brickinfo");
+ goto out;
}
+ brick_alloc = _gf_true;
- if (bricks) {
- brick_list = gf_strdup (bricks);
- all_bricks = gf_strdup (bricks);
- free_ptr = brick_list;
+ ret = glusterd_new_brick_validate(brick, brickinfo, msg, sizeof(msg),
+ NULL);
+ if (ret) {
+ *op_errstr = gf_strdup(msg);
+ ret = -1;
+ goto out;
}
- if (count)
- brick = strtok_r (brick_list+1, " \n", &saveptr);
-
-
- while ( i < count) {
- if (!glusterd_store_is_valid_brickpath (volname, brick) ||
- !glusterd_is_valid_volfpath (volname, brick)) {
- snprintf (msg, sizeof (msg), "brick path %s is "
- "too long", brick);
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRKPATH_TOO_LONG, "%s", msg);
- *op_errstr = gf_strdup (msg);
-
- ret = -1;
- goto out;
-
- }
-
- ret = glusterd_brickinfo_new_from_brick (brick, &brickinfo,
- _gf_true, NULL);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_NOT_FOUND,
- "Add-brick: Unable"
- " to get brickinfo");
- goto out;
- }
- brick_alloc = _gf_true;
-
- ret = glusterd_new_brick_validate (brick, brickinfo, msg,
- sizeof (msg), NULL);
+ if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
+#ifdef HAVE_BD_XLATOR
+ if (brickinfo->vg[0]) {
+ ret = glusterd_is_valid_vg(brickinfo, 1, msg);
if (ret) {
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_VG,
+ "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
}
-
- if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) {
-#ifdef HAVE_BD_XLATOR
- if (brickinfo->vg[0]) {
- ret = glusterd_is_valid_vg (brickinfo, 1, msg);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_VG, "%s",
- msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- }
+ }
#endif
- ret = glusterd_validate_and_create_brickpath (brickinfo,
- volinfo->volume_id,
- op_errstr, is_force,
- _gf_false);
- if (ret)
- goto out;
-
- /* A bricks mount dir is required only by snapshots which were
- * introduced in gluster-3.6.0
- */
- if (conf->op_version >= GD_OP_VERSION_3_6_0) {
- ret = glusterd_get_brick_mount_dir
- (brickinfo->path, brickinfo->hostname,
- brickinfo->mount_dir);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_MOUNTDIR_GET_FAIL,
- "Failed to get brick mount_dir");
- goto out;
- }
-
- snprintf (key, sizeof(key), "brick%d.mount_dir",
- i + 1);
- ret = dict_set_dynstr_with_alloc
- (rsp_dict, key, brickinfo->mount_dir);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set %s", key);
- goto out;
- }
- }
-
- local_brick_count = i + 1;
- }
-
- glusterd_brickinfo_delete (brickinfo);
- brick_alloc = _gf_false;
- brickinfo = NULL;
- brick = strtok_r (NULL, " \n", &saveptr);
- i++;
- }
-
- ret = dict_set_int32n (rsp_dict, "brick_count", SLEN ("brick_count"),
- local_brick_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set local_brick_count");
+ ret = glusterd_validate_and_create_brickpath(
+ brickinfo, volinfo->volume_id, op_errstr, is_force, _gf_false);
+ if (ret)
goto out;
- }
-out:
- GF_FREE (free_ptr);
- if (brick_alloc && brickinfo)
- glusterd_brickinfo_delete (brickinfo);
- GF_FREE (str_ret);
- GF_FREE (all_bricks);
-
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
-
- return ret;
-}
-
-int
-glusterd_remove_brick_validate_bricks (gf1_op_commands cmd, int32_t brick_count,
- dict_t *dict,
- glusterd_volinfo_t *volinfo,
- char **errstr,
- gf_cli_defrag_type cmd_defrag)
-{
- char *brick = NULL;
- char msg[2048] = "";
- char key[64] = "";
- int keylen;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- int i = 0;
- int ret = -1;
- char pidfile[PATH_MAX+1] = {0,};
- glusterd_conf_t *priv = THIS->private;
- int pid = -1;
-
- /* Check whether all the nodes of the bricks to be removed are
- * up, if not fail the operation */
- for (i = 1; i <= brick_count; i++) {
- keylen = snprintf (key, sizeof (key), "brick%d", i);
- ret = dict_get_strn (dict, key, keylen, &brick);
+ /* A bricks mount dir is required only by snapshots which were
+ * introduced in gluster-3.6.0
+ */
+ if (conf->op_version >= GD_OP_VERSION_3_6_0) {
+ ret = glusterd_get_brick_mount_dir(
+ brickinfo->path, brickinfo->hostname, brickinfo->mount_dir);
if (ret) {
- snprintf (msg, sizeof (msg),
- "Unable to get %s", key);
- *errstr = gf_strdup (msg);
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRICK_MOUNTDIR_GET_FAIL,
+ "Failed to get brick mount_dir");
+ goto out;
}
- ret =
- glusterd_volume_brickinfo_get_by_brick(brick, volinfo,
- &brickinfo,
- _gf_false);
+ snprintf(key, sizeof(key), "brick%d.mount_dir", i + 1);
+ ret = dict_set_dynstr_with_alloc(rsp_dict, key,
+ brickinfo->mount_dir);
if (ret) {
- snprintf (msg, sizeof (msg), "Incorrect brick "
- "%s for volume %s", brick, volinfo->volname);
- *errstr = gf_strdup (msg);
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED, "Failed to set %s", key);
+ goto out;
}
- /* Do not allow commit if the bricks are not decommissioned
- * if its a remove brick commit or detach-tier commit
- */
- if (!brickinfo->decommissioned) {
- if (cmd == GF_OP_CMD_COMMIT) {
- snprintf (msg, sizeof (msg), "Brick %s "
- "is not decommissioned. "
- "Use start or force option", brick);
- *errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
+ }
- if (cmd == GF_OP_CMD_DETACH_COMMIT ||
- cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT) {
- snprintf (msg, sizeof (msg), "Bricks in Hot "
- "tier are not decommissioned yet. Use "
- "gluster volume tier <VOLNAME> "
- "detach start to start the decommission process");
- *errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
- } else {
- if ((cmd == GF_OP_CMD_DETACH_COMMIT ||
- (cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT)) &&
- (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_STARTED)) {
- snprintf (msg, sizeof (msg), "Bricks in Hot "
- "tier are not decommissioned yet. Wait for "
- "the detach to complete using gluster volume "
- "tier <VOLNAME> status.");
- *errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
- }
+ local_brick_count = i + 1;
+ }
- if (glusterd_is_local_brick (THIS, volinfo, brickinfo)) {
- switch (cmd) {
- case GF_OP_CMD_START:
- case GF_OP_CMD_DETACH_START:
- goto check;
- case GF_OP_CMD_NONE:
- default:
- break;
- }
+ glusterd_brickinfo_delete(brickinfo);
+ brick_alloc = _gf_false;
+ brickinfo = NULL;
+ brick = strtok_r(NULL, " \n", &saveptr);
+ i++;
+ }
- switch (cmd_defrag) {
- case GF_DEFRAG_CMD_DETACH_START:
- break;
- case GF_DEFRAG_CMD_NONE:
- default:
- continue;
- }
-check:
- if (brickinfo->status != GF_BRICK_STARTED) {
- snprintf (msg, sizeof (msg), "Found stopped "
- "brick %s. Use force option to "
- "remove the offline brick" , brick);
- *errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
- GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo,
- brickinfo, priv);
- if (!gf_is_service_running (pidfile, &pid)) {
- snprintf (msg, sizeof (msg), "Found dead "
- "brick %s", brick);
- *errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- } else {
- ret = 0;
- }
- continue;
- }
-
- rcu_read_lock ();
- peerinfo = glusterd_peerinfo_find_by_uuid
- (brickinfo->uuid);
- if (!peerinfo) {
- snprintf (msg, sizeof(msg), "Host node of the "
- "brick %s is not in cluster", brick);
- *errstr = gf_strdup (msg);
- ret = -1;
- rcu_read_unlock ();
- goto out;
- }
- if (!peerinfo->connected) {
- snprintf (msg, sizeof(msg), "Host node of the "
- "brick %s is down", brick);
- *errstr = gf_strdup (msg);
- ret = -1;
- rcu_read_unlock ();
- goto out;
- }
- rcu_read_unlock ();
- }
+ ret = dict_set_int32n(rsp_dict, "brick_count", SLEN("brick_count"),
+ local_brick_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set local_brick_count");
+ goto out;
+ }
out:
- return ret;
+ GF_FREE(free_ptr);
+ if (brick_alloc && brickinfo)
+ glusterd_brickinfo_delete(brickinfo);
+ GF_FREE(str_ret);
+ GF_FREE(all_bricks);
+
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+
+ return ret;
}
int
-glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr)
+glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
+ dict_t *dict, glusterd_volinfo_t *volinfo,
+ char **errstr,
+ gf_cli_defrag_type cmd_defrag)
{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char *errstr = NULL;
- int32_t brick_count = 0;
- char msg[2048] = "";
- int32_t flag = 0;
- gf1_op_commands cmd = GF_OP_CMD_NONE;
- char *task_id_str = NULL;
- xlator_t *this = NULL;
- gsync_status_param_t param = {0,};
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = op_version_check (this, GD_OP_VER_PERSISTENT_AFR_XATTRS,
- msg, sizeof(msg));
+ char *brick = NULL;
+ char msg[2048] = "";
+ char key[64] = "";
+ int keylen;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int i = 0;
+ int ret = -1;
+ char pidfile[PATH_MAX + 1] = {
+ 0,
+ };
+ glusterd_conf_t *priv = THIS->private;
+ int pid = -1;
+
+ /* Check whether all the nodes of the bricks to be removed are
+ * up, if not fail the operation */
+ for (i = 1; i <= brick_count; i++) {
+ keylen = snprintf(key, sizeof(key), "brick%d", i);
+ ret = dict_get_strn(dict, key, keylen, &brick);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_VERSION_MISMATCH, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
+ snprintf(msg, sizeof(msg), "Unable to get %s", key);
+ *errstr = gf_strdup(msg);
+ goto out;
}
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
-
+ ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
+ _gf_false);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "Volume %s does not exist", volname);
+ snprintf(msg, sizeof(msg),
+ "Incorrect brick "
+ "%s for volume %s",
+ brick, volinfo->volname);
+ *errstr = gf_strdup(msg);
+ goto out;
+ }
+ /* Do not allow commit if the bricks are not decommissioned
+ * if its a remove brick commit or detach-tier commit
+ */
+ if (!brickinfo->decommissioned) {
+ if (cmd == GF_OP_CMD_COMMIT) {
+ snprintf(msg, sizeof(msg),
+ "Brick %s "
+ "is not decommissioned. "
+ "Use start or force option",
+ brick);
+ *errstr = gf_strdup(msg);
+ ret = -1;
goto out;
- }
+ }
- ret = glusterd_validate_volume_id (dict, volinfo);
- if (ret)
+ if (cmd == GF_OP_CMD_DETACH_COMMIT ||
+ cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT) {
+ snprintf(msg, sizeof(msg),
+ "Bricks in Hot "
+ "tier are not decommissioned yet. Use "
+ "gluster volume tier <VOLNAME> "
+ "detach start to start the decommission process");
+ *errstr = gf_strdup(msg);
+ ret = -1;
goto out;
-
- ret = dict_get_int32n (dict, "command", SLEN ("command"), &flag);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get brick command");
+ }
+ } else {
+ if ((cmd == GF_OP_CMD_DETACH_COMMIT ||
+ (cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT)) &&
+ (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_STARTED)) {
+ snprintf(msg, sizeof(msg),
+ "Bricks in Hot "
+ "tier are not decommissioned yet. Wait for "
+ "the detach to complete using gluster volume "
+ "tier <VOLNAME> status.");
+ *errstr = gf_strdup(msg);
+ ret = -1;
goto out;
- }
- cmd = flag;
-
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &brick_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get brick count");
+ }
+ }
+
+ if (glusterd_is_local_brick(THIS, volinfo, brickinfo)) {
+ switch (cmd) {
+ case GF_OP_CMD_START:
+ case GF_OP_CMD_DETACH_START:
+ goto check;
+ case GF_OP_CMD_NONE:
+ default:
+ break;
+ }
+
+ switch (cmd_defrag) {
+ case GF_DEFRAG_CMD_DETACH_START:
+ break;
+ case GF_DEFRAG_CMD_NONE:
+ default:
+ continue;
+ }
+ check:
+ if (brickinfo->status != GF_BRICK_STARTED) {
+ snprintf(msg, sizeof(msg),
+ "Found stopped "
+ "brick %s. Use force option to "
+ "remove the offline brick",
+ brick);
+ *errstr = gf_strdup(msg);
+ ret = -1;
goto out;
- }
-
- ret = 0;
- if (volinfo->brick_count == brick_count) {
- errstr = gf_strdup ("Deleting all the bricks of the "
- "volume is not allowed");
+ }
+ GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo, priv);
+ if (!gf_is_service_running(pidfile, &pid)) {
+ snprintf(msg, sizeof(msg),
+ "Found dead "
+ "brick %s",
+ brick);
+ *errstr = gf_strdup(msg);
ret = -1;
goto out;
- }
+ } else {
+ ret = 0;
+ }
+ continue;
+ }
+
+ rcu_read_lock();
+ peerinfo = glusterd_peerinfo_find_by_uuid(brickinfo->uuid);
+ if (!peerinfo) {
+ snprintf(msg, sizeof(msg),
+ "Host node of the "
+ "brick %s is not in cluster",
+ brick);
+ *errstr = gf_strdup(msg);
+ ret = -1;
+ rcu_read_unlock();
+ goto out;
+ }
+ if (!peerinfo->connected) {
+ snprintf(msg, sizeof(msg),
+ "Host node of the "
+ "brick %s is down",
+ brick);
+ *errstr = gf_strdup(msg);
+ ret = -1;
+ rcu_read_unlock();
+ goto out;
+ }
+ rcu_read_unlock();
+ }
+out:
+ return ret;
+}
+
+int
+glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
+{
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *errstr = NULL;
+ int32_t brick_count = 0;
+ char msg[2048] = "";
+ int32_t flag = 0;
+ gf1_op_commands cmd = GF_OP_CMD_NONE;
+ char *task_id_str = NULL;
+ xlator_t *this = NULL;
+ gsync_status_param_t param = {
+ 0,
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = op_version_check(this, GD_OP_VER_PERSISTENT_AFR_XATTRS, msg,
+ sizeof(msg));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Volume %s does not exist", volname);
+ goto out;
+ }
+
+ ret = glusterd_validate_volume_id(dict, volinfo);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32n(dict, "command", SLEN("command"), &flag);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get brick command");
+ goto out;
+ }
+ cmd = flag;
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get brick count");
+ goto out;
+ }
+
+ ret = 0;
+ if (volinfo->brick_count == brick_count) {
+ errstr = gf_strdup(
+ "Deleting all the bricks of the "
+ "volume is not allowed");
ret = -1;
- switch (cmd) {
+ goto out;
+ }
+
+ ret = -1;
+ switch (cmd) {
case GF_OP_CMD_NONE:
- errstr = gf_strdup ("no remove-brick command issued");
- goto out;
+ errstr = gf_strdup("no remove-brick command issued");
+ goto out;
case GF_OP_CMD_STATUS:
- ret = 0;
- goto out;
+ ret = 0;
+ goto out;
case GF_OP_CMD_DETACH_START:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf (msg, sizeof(msg), "volume %s is not a tier "
- "volume", volinfo->volname);
- errstr = gf_strdup (msg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_TIER, "%s", errstr);
- goto out;
- }
-
- case GF_OP_CMD_START:
- {
- if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
- dict_getn (dict, "replica-count",
- SLEN ("replica-count"))) {
- snprintf (msg, sizeof(msg), "Migration of data is not "
- "needed when reducing replica count. Use the"
- " 'force' option");
- errstr = gf_strdup (msg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_USE_THE_FORCE, "%s", errstr);
- goto out;
- }
-
- if (GLUSTERD_STATUS_STARTED != volinfo->status) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- snprintf (msg, sizeof (msg), "Volume %s needs "
- "to be started before detach-tier "
- "(you can use 'force' or 'commit' "
- "to override this behavior)",
- volinfo->volname);
- } else {
- snprintf (msg, sizeof (msg), "Volume %s needs "
- "to be started before remove-brick "
- "(you can use 'force' or 'commit' "
- "to override this behavior)",
- volinfo->volname);
- }
- errstr = gf_strdup (msg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_STARTED, "%s", errstr);
- goto out;
- }
- if (!gd_is_remove_brick_committed (volinfo)) {
- snprintf (msg, sizeof (msg), "An earlier remove-brick "
- "task exists for volume %s. Either commit it"
- " or stop it before starting a new task.",
- volinfo->volname);
- errstr = gf_strdup (msg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OLD_REMOVE_BRICK_EXISTS, "Earlier remove-brick"
- " task exists for volume %s.",
- volinfo->volname);
- goto out;
- }
- if (glusterd_is_defrag_on(volinfo)) {
- errstr = gf_strdup("Rebalance is in progress. Please "
- "retry after completion");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OIP_RETRY_LATER, "%s", errstr);
- goto out;
- }
-
- /* Check if the connected clients are all of version
- * glusterfs-3.6 and higher. This is needed to prevent some data
- * loss issues that could occur when older clients are connected
- * when rebalance is run.
- */
- ret = glusterd_check_client_op_version_support
- (volname, GD_OP_VERSION_3_6_0, NULL);
- if (ret) {
- ret = gf_asprintf (op_errstr, "Volume %s has one or "
- "more connected clients of a version"
- " lower than GlusterFS-v3.6.0. "
- "Starting remove-brick in this state "
- "could lead to data loss.\nPlease "
- "disconnect those clients before "
- "attempting this command again.",
- volname);
- goto out;
- }
-
- ret = glusterd_remove_brick_validate_bricks (cmd, brick_count,
- dict, volinfo,
- &errstr,
- GF_DEFRAG_CMD_NONE);
- if (ret)
- goto out;
-
- if (is_origin_glusterd (dict)) {
- ret = glusterd_generate_and_set_task_id
- (dict, GF_REMOVE_BRICK_TID_KEY,
- SLEN (GF_REMOVE_BRICK_TID_KEY));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TASKID_GEN_FAIL,
- "Failed to generate task-id");
- goto out;
- }
+ if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
+ snprintf(msg, sizeof(msg),
+ "volume %s is not a tier "
+ "volume",
+ volinfo->volname);
+ errstr = gf_strdup(msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s",
+ errstr);
+ goto out;
+ }
+
+ case GF_OP_CMD_START: {
+ if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
+ dict_getn(dict, "replica-count", SLEN("replica-count"))) {
+ snprintf(msg, sizeof(msg),
+ "Migration of data is not "
+ "needed when reducing replica count. Use the"
+ " 'force' option");
+ errstr = gf_strdup(msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_USE_THE_FORCE, "%s",
+ errstr);
+ goto out;
+ }
+
+ if (GLUSTERD_STATUS_STARTED != volinfo->status) {
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s needs "
+ "to be started before detach-tier "
+ "(you can use 'force' or 'commit' "
+ "to override this behavior)",
+ volinfo->volname);
} else {
- ret = dict_get_strn (dict, GF_REMOVE_BRICK_TID_KEY,
- SLEN (GF_REMOVE_BRICK_TID_KEY),
- &task_id_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, errno,
- GD_MSG_DICT_GET_FAILED,
- "Missing remove-brick-id");
- ret = 0;
- }
+ snprintf(msg, sizeof(msg),
+ "Volume %s needs "
+ "to be started before remove-brick "
+ "(you can use 'force' or 'commit' "
+ "to override this behavior)",
+ volinfo->volname);
+ }
+ errstr = gf_strdup(msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED,
+ "%s", errstr);
+ goto out;
+ }
+ if (!gd_is_remove_brick_committed(volinfo)) {
+ snprintf(msg, sizeof(msg),
+ "An earlier remove-brick "
+ "task exists for volume %s. Either commit it"
+ " or stop it before starting a new task.",
+ volinfo->volname);
+ errstr = gf_strdup(msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OLD_REMOVE_BRICK_EXISTS,
+ "Earlier remove-brick"
+ " task exists for volume %s.",
+ volinfo->volname);
+ goto out;
+ }
+ if (glusterd_is_defrag_on(volinfo)) {
+ errstr = gf_strdup(
+ "Rebalance is in progress. Please "
+ "retry after completion");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER,
+ "%s", errstr);
+ goto out;
+ }
+
+ /* Check if the connected clients are all of version
+ * glusterfs-3.6 and higher. This is needed to prevent some data
+ * loss issues that could occur when older clients are connected
+ * when rebalance is run.
+ */
+ ret = glusterd_check_client_op_version_support(
+ volname, GD_OP_VERSION_3_6_0, NULL);
+ if (ret) {
+ ret = gf_asprintf(op_errstr,
+ "Volume %s has one or "
+ "more connected clients of a version"
+ " lower than GlusterFS-v3.6.0. "
+ "Starting remove-brick in this state "
+ "could lead to data loss.\nPlease "
+ "disconnect those clients before "
+ "attempting this command again.",
+ volname);
+ goto out;
+ }
+
+ ret = glusterd_remove_brick_validate_bricks(
+ cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
+ if (ret)
+ goto out;
+
+ if (is_origin_glusterd(dict)) {
+ ret = glusterd_generate_and_set_task_id(
+ dict, GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TASKID_GEN_FAIL,
+ "Failed to generate task-id");
+ goto out;
+ }
+ } else {
+ ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY),
+ &task_id_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, errno,
+ GD_MSG_DICT_GET_FAILED, "Missing remove-brick-id");
+ ret = 0;
}
- break;
+ }
+ break;
}
case GF_OP_CMD_STOP:
case GF_OP_CMD_STOP_DETACH_TIER:
- ret = 0;
- break;
+ ret = 0;
+ break;
case GF_OP_CMD_DETACH_COMMIT:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf (msg, sizeof(msg), "volume %s is not a tier "
- "volume", volinfo->volname);
- errstr = gf_strdup (msg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_TIER, "%s", errstr);
- goto out;
- }
- if (volinfo->decommission_in_progress) {
- errstr = gf_strdup ("use 'force' option as migration "
- "is in progress");
- goto out;
- }
- if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) {
- errstr = gf_strdup ("use 'force' option as migration "
- "has failed");
- goto out;
- }
-
- ret = glusterd_remove_brick_validate_bricks (cmd, brick_count,
- dict, volinfo,
- &errstr,
- GF_DEFRAG_CMD_NONE);
- if (ret)
- goto out;
-
- /* If geo-rep is configured, for this volume, it should be
- * stopped.
- */
- param.volinfo = volinfo;
- ret = glusterd_check_geo_rep_running (&param, op_errstr);
- if (ret || param.is_active) {
- ret = -1;
- goto out;
- }
- break;
+ if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
+ snprintf(msg, sizeof(msg),
+ "volume %s is not a tier "
+ "volume",
+ volinfo->volname);
+ errstr = gf_strdup(msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s",
+ errstr);
+ goto out;
+ }
+ if (volinfo->decommission_in_progress) {
+ errstr = gf_strdup(
+ "use 'force' option as migration "
+ "is in progress");
+ goto out;
+ }
+ if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) {
+ errstr = gf_strdup(
+ "use 'force' option as migration "
+ "has failed");
+ goto out;
+ }
+
+ ret = glusterd_remove_brick_validate_bricks(
+ cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
+ if (ret)
+ goto out;
+
+ /* If geo-rep is configured, for this volume, it should be
+ * stopped.
+ */
+ param.volinfo = volinfo;
+ ret = glusterd_check_geo_rep_running(&param, op_errstr);
+ if (ret || param.is_active) {
+ ret = -1;
+ goto out;
+ }
+ break;
case GF_OP_CMD_COMMIT:
- if (volinfo->decommission_in_progress) {
- errstr = gf_strdup ("use 'force' option as migration "
- "is in progress");
- goto out;
- }
+ if (volinfo->decommission_in_progress) {
+ errstr = gf_strdup(
+ "use 'force' option as migration "
+ "is in progress");
+ goto out;
+ }
- if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) {
- errstr = gf_strdup ("use 'force' option as migration "
- "has failed");
- goto out;
- }
+ if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) {
+ errstr = gf_strdup(
+ "use 'force' option as migration "
+ "has failed");
+ goto out;
+ }
- ret = glusterd_remove_brick_validate_bricks (cmd, brick_count,
- dict, volinfo,
- &errstr,
- GF_DEFRAG_CMD_NONE);
- if (ret)
- goto out;
+ ret = glusterd_remove_brick_validate_bricks(
+ cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
+ if (ret)
+ goto out;
- /* If geo-rep is configured, for this volume, it should be
- * stopped.
- */
- param.volinfo = volinfo;
- ret = glusterd_check_geo_rep_running (&param, op_errstr);
- if (ret || param.is_active) {
- ret = -1;
- goto out;
- }
+ /* If geo-rep is configured, for this volume, it should be
+ * stopped.
+ */
+ param.volinfo = volinfo;
+ ret = glusterd_check_geo_rep_running(&param, op_errstr);
+ if (ret || param.is_active) {
+ ret = -1;
+ goto out;
+ }
- break;
+ break;
case GF_OP_CMD_DETACH_COMMIT_FORCE:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf (msg, sizeof(msg), "volume %s is not a tier "
- "volume", volinfo->volname);
- errstr = gf_strdup (msg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_TIER, "%s", errstr);
- goto out;
- }
+ if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
+ snprintf(msg, sizeof(msg),
+ "volume %s is not a tier "
+ "volume",
+ volinfo->volname);
+ errstr = gf_strdup(msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s",
+ errstr);
+ goto out;
+ }
case GF_OP_CMD_COMMIT_FORCE:
- break;
- }
- ret = 0;
+ break;
+ }
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- if (ret && errstr) {
- if (op_errstr)
- *op_errstr = errstr;
- }
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ if (ret && errstr) {
+ if (op_errstr)
+ *op_errstr = errstr;
+ }
- return ret;
+ return ret;
}
int
-glusterd_remove_brick_migrate_cbk (glusterd_volinfo_t *volinfo,
- gf_defrag_status_t status)
+glusterd_remove_brick_migrate_cbk(glusterd_volinfo_t *volinfo,
+ gf_defrag_status_t status)
{
- int ret = 0;
+ int ret = 0;
-#if 0 /* TODO: enable this behavior once cluster-wide awareness comes for
- defrag cbk function */
+#if 0 /* TODO: enable this behavior once cluster-wide awareness comes for \
+ defrag cbk function */
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_brickinfo_t *tmp = NULL;
@@ -2514,962 +2490,931 @@ glusterd_remove_brick_migrate_cbk (glusterd_volinfo_t *volinfo,
#endif
- volinfo->decommission_in_progress = 0;
- return ret;
+ volinfo->decommission_in_progress = 0;
+ return ret;
}
static int
-glusterd_op_perform_attach_tier (dict_t *dict,
- glusterd_volinfo_t *volinfo,
- int count,
- char *bricks)
+glusterd_op_perform_attach_tier(dict_t *dict, glusterd_volinfo_t *volinfo,
+ int count, char *bricks)
{
- int ret = 0;
- int replica_count = 0;
- int type = 0;
-
- /*
- * Store the new (cold) tier's structure until the graph is generated.
- * If there is a failure before the graph is generated the
- * structure will revert to its original state.
- */
- volinfo->tier_info.cold_dist_leaf_count = volinfo->dist_leaf_count;
- volinfo->tier_info.cold_type = volinfo->type;
- volinfo->tier_info.cold_brick_count = volinfo->brick_count;
- volinfo->tier_info.cold_replica_count = volinfo->replica_count;
- volinfo->tier_info.cold_disperse_count = volinfo->disperse_count;
- volinfo->tier_info.cold_redundancy_count = volinfo->redundancy_count;
-
- ret = dict_get_int32n (dict, "replica-count",
- SLEN ("replica-count"), &replica_count);
- if (!ret)
- volinfo->tier_info.hot_replica_count = replica_count;
- else
- volinfo->tier_info.hot_replica_count = 1;
- volinfo->tier_info.hot_brick_count = count;
- ret = dict_get_int32n (dict, "hot-type", SLEN ("hot-type"), &type);
- volinfo->tier_info.hot_type = type;
- ret = dict_set_int32n (dict, "type", SLEN ("type"),
- GF_CLUSTER_TYPE_TIER);
-
- if (!ret)
- ret = dict_set_nstrn (volinfo->dict, "features.ctr-enabled",
- SLEN ("features.ctr-enabled"),
- "on", SLEN ("on"));
-
- if (!ret)
- ret = dict_set_nstrn (volinfo->dict, "cluster.tier-mode",
- SLEN ("cluster.tier-mode"),
- "cache", SLEN ("cache"));
-
- return ret;
+ int ret = 0;
+ int replica_count = 0;
+ int type = 0;
+
+ /*
+ * Store the new (cold) tier's structure until the graph is generated.
+ * If there is a failure before the graph is generated the
+ * structure will revert to its original state.
+ */
+ volinfo->tier_info.cold_dist_leaf_count = volinfo->dist_leaf_count;
+ volinfo->tier_info.cold_type = volinfo->type;
+ volinfo->tier_info.cold_brick_count = volinfo->brick_count;
+ volinfo->tier_info.cold_replica_count = volinfo->replica_count;
+ volinfo->tier_info.cold_disperse_count = volinfo->disperse_count;
+ volinfo->tier_info.cold_redundancy_count = volinfo->redundancy_count;
+
+ ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
+ &replica_count);
+ if (!ret)
+ volinfo->tier_info.hot_replica_count = replica_count;
+ else
+ volinfo->tier_info.hot_replica_count = 1;
+ volinfo->tier_info.hot_brick_count = count;
+ ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type);
+ volinfo->tier_info.hot_type = type;
+ ret = dict_set_int32n(dict, "type", SLEN("type"), GF_CLUSTER_TYPE_TIER);
+
+ if (!ret)
+ ret = dict_set_nstrn(volinfo->dict, "features.ctr-enabled",
+ SLEN("features.ctr-enabled"), "on", SLEN("on"));
+
+ if (!ret)
+ ret = dict_set_nstrn(volinfo->dict, "cluster.tier-mode",
+ SLEN("cluster.tier-mode"), "cache", SLEN("cache"));
+
+ return ret;
}
int
-glusterd_op_add_brick (dict_t *dict, char **op_errstr)
+glusterd_op_add_brick(dict_t *dict, char **op_errstr)
{
- int ret = 0;
- char *volname = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char *bricks = NULL;
- int32_t count = 0;
-
- this = THIS;
- GF_ASSERT (this);
-
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
-
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
-
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "Unable to allocate memory");
- goto out;
- }
-
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &count);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get count");
- goto out;
- }
-
-
- ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get bricks");
- goto out;
- }
-
- if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) {
- gf_msg_debug (THIS->name, 0, "Adding tier");
- glusterd_op_perform_attach_tier (dict, volinfo, count, bricks);
- }
-
- ret = glusterd_op_perform_add_bricks (volinfo, count, bricks, dict);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_BRICK_ADD_FAIL, "Unable to add bricks");
- goto out;
- }
- if (priv->op_version <= GD_OP_VERSION_3_7_5) {
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
- goto out;
- } else {
- /*
- * The cluster is operating at version greater than
- * gluster-3.7.5. So no need to store volfiles
- * in commit phase, the same will be done
- * in post validate phase with v3 framework.
- */
- }
+ int ret = 0;
+ char *volname = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ char *bricks = NULL;
+ int32_t count = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Unable to allocate memory");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get count");
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get bricks");
+ goto out;
+ }
+
+ if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
+ gf_msg_debug(THIS->name, 0, "Adding tier");
+ glusterd_op_perform_attach_tier(dict, volinfo, count, bricks);
+ }
+
+ ret = glusterd_op_perform_add_bricks(volinfo, count, bricks, dict);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL,
+ "Unable to add bricks");
+ goto out;
+ }
+ if (priv->op_version <= GD_OP_VERSION_3_7_5) {
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ goto out;
+ } else {
+ /*
+ * The cluster is operating at version greater than
+ * gluster-3.7.5. So no need to store volfiles
+ * in commit phase, the same will be done
+ * in post validate phase with v3 framework.
+ */
+ }
- if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_svcs_manager (volinfo);
+ if (GLUSTERD_STATUS_STARTED == volinfo->status)
+ ret = glusterd_svcs_manager(volinfo);
out:
- return ret;
+ return ret;
}
int
-glusterd_op_add_tier_brick (dict_t *dict, char **op_errstr)
+glusterd_op_add_tier_brick(dict_t *dict, char **op_errstr)
{
- int ret = 0;
- char *volname = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char *bricks = NULL;
- int32_t count = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
-
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
-
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "Volume not found");
- goto out;
- }
-
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &count);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get count");
- goto out;
- }
-
-
- ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get bricks");
- goto out;
- }
-
- if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) {
- gf_msg_debug (THIS->name, 0, "Adding tier");
- glusterd_op_perform_attach_tier (dict, volinfo, count, bricks);
- }
-
- ret = glusterd_op_perform_add_bricks (volinfo, count, bricks, dict);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_BRICK_ADD_FAIL, "Unable to add bricks");
- goto out;
- }
- if (priv->op_version <= GD_OP_VERSION_3_10_0) {
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
- goto out;
- } else {
- /*
- * The cluster is operating at version greater than
- * gluster-3.10.0. So no need to store volfiles
- * in commit phase, the same will be done
- * in post validate phase with v3 framework.
- */
- }
+ int ret = 0;
+ char *volname = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ char *bricks = NULL;
+ int32_t count = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Volume not found");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get count");
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get bricks");
+ goto out;
+ }
+
+ if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
+ gf_msg_debug(THIS->name, 0, "Adding tier");
+ glusterd_op_perform_attach_tier(dict, volinfo, count, bricks);
+ }
+
+ ret = glusterd_op_perform_add_bricks(volinfo, count, bricks, dict);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL,
+ "Unable to add bricks");
+ goto out;
+ }
+ if (priv->op_version <= GD_OP_VERSION_3_10_0) {
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ goto out;
+ } else {
+ /*
+ * The cluster is operating at version greater than
+ * gluster-3.10.0. So no need to store volfiles
+ * in commit phase, the same will be done
+ * in post validate phase with v3 framework.
+ */
+ }
- if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_svcs_manager (volinfo);
+ if (GLUSTERD_STATUS_STARTED == volinfo->status)
+ ret = glusterd_svcs_manager(volinfo);
out:
- return ret;
+ return ret;
}
void
-glusterd_op_perform_detach_tier (glusterd_volinfo_t *volinfo)
+glusterd_op_perform_detach_tier(glusterd_volinfo_t *volinfo)
{
- volinfo->type = volinfo->tier_info.cold_type;
- volinfo->replica_count = volinfo->tier_info.cold_replica_count;
- volinfo->disperse_count = volinfo->tier_info.cold_disperse_count;
- volinfo->redundancy_count = volinfo->tier_info.cold_redundancy_count;
- volinfo->dist_leaf_count = volinfo->tier_info.cold_dist_leaf_count;
+ volinfo->type = volinfo->tier_info.cold_type;
+ volinfo->replica_count = volinfo->tier_info.cold_replica_count;
+ volinfo->disperse_count = volinfo->tier_info.cold_disperse_count;
+ volinfo->redundancy_count = volinfo->tier_info.cold_redundancy_count;
+ volinfo->dist_leaf_count = volinfo->tier_info.cold_dist_leaf_count;
}
int
-glusterd_op_remove_brick (dict_t *dict, char **op_errstr)
+glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char *brick = NULL;
- int32_t count = 0;
- int32_t i = 1;
- char key[64] = "";
- int keylen;
- int32_t flag = 0;
- int need_rebalance = 0;
- int force = 0;
- gf1_op_commands cmd = 0;
- int32_t replica_count = 0;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *tmp = NULL;
- char *task_id_str = NULL;
- xlator_t *this = NULL;
- dict_t *bricks_dict = NULL;
- char *brick_tmpstr = NULL;
- int start_remove = 0;
- uint32_t commit_hash = 0;
- int defrag_cmd = 0;
- int detach_commit = 0;
- void *tier_info = NULL;
- char *cold_shd_key = NULL;
- char *hot_shd_key = NULL;
- int delete_key = 1;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, conf, out);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_ADD_FAIL, "Unable to get volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "Unable to allocate memory");
- goto out;
- }
-
- ret = dict_get_int32n (dict, "command", SLEN ("command"), &flag);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get command");
- goto out;
- }
- cmd = flag;
-
- if ((GF_OP_CMD_START == cmd) ||
- (GF_OP_CMD_DETACH_START == cmd))
- start_remove = 1;
-
- /* Set task-id, if available, in ctx dict for operations other than
- * start
- */
-
- if (is_origin_glusterd (dict) && (!start_remove)) {
- if (!gf_uuid_is_null (volinfo->rebal.rebalance_id)) {
- ret = glusterd_copy_uuid_to_dict
- (volinfo->rebal.rebalance_id, dict,
- GF_REMOVE_BRICK_TID_KEY,
- SLEN (GF_REMOVE_BRICK_TID_KEY));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REMOVE_BRICK_ID_SET_FAIL,
- "Failed to set remove-brick-id");
- goto out;
- }
- }
- }
-
- /* Clear task-id, rebal.op and stored bricks on commmitting/stopping
- * remove-brick */
- if ((!start_remove) && (cmd != GF_OP_CMD_STATUS)) {
- gf_uuid_clear (volinfo->rebal.rebalance_id);
- volinfo->rebal.op = GD_OP_NONE;
- dict_unref (volinfo->rebal.dict);
- volinfo->rebal.dict = NULL;
- }
-
- ret = -1;
- switch (cmd) {
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *brick = NULL;
+ int32_t count = 0;
+ int32_t i = 1;
+ char key[64] = "";
+ int keylen;
+ int32_t flag = 0;
+ int need_rebalance = 0;
+ int force = 0;
+ gf1_op_commands cmd = 0;
+ int32_t replica_count = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *tmp = NULL;
+ char *task_id_str = NULL;
+ xlator_t *this = NULL;
+ dict_t *bricks_dict = NULL;
+ char *brick_tmpstr = NULL;
+ int start_remove = 0;
+ uint32_t commit_hash = 0;
+ int defrag_cmd = 0;
+ int detach_commit = 0;
+ void *tier_info = NULL;
+ char *cold_shd_key = NULL;
+ char *hot_shd_key = NULL;
+ int delete_key = 1;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Unable to allocate memory");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "command", SLEN("command"), &flag);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get command");
+ goto out;
+ }
+ cmd = flag;
+
+ if ((GF_OP_CMD_START == cmd) || (GF_OP_CMD_DETACH_START == cmd))
+ start_remove = 1;
+
+ /* Set task-id, if available, in ctx dict for operations other than
+ * start
+ */
+
+ if (is_origin_glusterd(dict) && (!start_remove)) {
+ if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
+ ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id, dict,
+ GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_REMOVE_BRICK_ID_SET_FAIL,
+ "Failed to set remove-brick-id");
+ goto out;
+ }
+ }
+ }
+
+ /* Clear task-id, rebal.op and stored bricks on commmitting/stopping
+ * remove-brick */
+ if ((!start_remove) && (cmd != GF_OP_CMD_STATUS)) {
+ gf_uuid_clear(volinfo->rebal.rebalance_id);
+ volinfo->rebal.op = GD_OP_NONE;
+ dict_unref(volinfo->rebal.dict);
+ volinfo->rebal.dict = NULL;
+ }
+
+ ret = -1;
+ switch (cmd) {
case GF_OP_CMD_NONE:
- goto out;
+ goto out;
case GF_OP_CMD_STATUS:
- ret = 0;
- goto out;
+ ret = 0;
+ goto out;
case GF_OP_CMD_STOP:
- case GF_OP_CMD_STOP_DETACH_TIER:
- {
- /* Fall back to the old volume file */
- cds_list_for_each_entry_safe (brickinfo, tmp, &volinfo->bricks,
- brick_list) {
- if (!brickinfo->decommissioned)
- continue;
- brickinfo->decommissioned = 0;
- }
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "failed to create volfiles");
- goto out;
- }
-
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_VOLINFO_SET_FAIL,
- "failed to store volinfo");
- goto out;
- }
-
- ret = 0;
- goto out;
+ case GF_OP_CMD_STOP_DETACH_TIER: {
+ /* Fall back to the old volume file */
+ cds_list_for_each_entry_safe(brickinfo, tmp, &volinfo->bricks,
+ brick_list)
+ {
+ if (!brickinfo->decommissioned)
+ continue;
+ brickinfo->decommissioned = 0;
+ }
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles");
+ goto out;
+ }
+
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL,
+ "failed to store volinfo");
+ goto out;
+ }
+
+ ret = 0;
+ goto out;
}
case GF_OP_CMD_DETACH_START:
case GF_OP_CMD_START:
- /* Reset defrag status to 'NOT STARTED' whenever a
- * remove-brick/rebalance command is issued to remove
- * stale information from previous run.
- * Update defrag_cmd as well or it will only be done
- * for nodes on which the brick to be removed exists.
- */
- volinfo->rebal.defrag_cmd = cmd;
- volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
- ret = dict_get_strn (dict, GF_REMOVE_BRICK_TID_KEY,
- SLEN (GF_REMOVE_BRICK_TID_KEY),
- &task_id_str);
- if (ret) {
- gf_msg_debug (this->name, errno,
- "Missing remove-brick-id");
- ret = 0;
- } else {
- gf_uuid_parse (task_id_str, volinfo->rebal.rebalance_id) ;
- volinfo->rebal.op = GD_OP_REMOVE_BRICK;
- }
- force = 0;
- break;
+ /* Reset defrag status to 'NOT STARTED' whenever a
+ * remove-brick/rebalance command is issued to remove
+ * stale information from previous run.
+ * Update defrag_cmd as well or it will only be done
+ * for nodes on which the brick to be removed exists.
+ */
+ volinfo->rebal.defrag_cmd = cmd;
+ volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
+ ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY), &task_id_str);
+ if (ret) {
+ gf_msg_debug(this->name, errno, "Missing remove-brick-id");
+ ret = 0;
+ } else {
+ gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
+ volinfo->rebal.op = GD_OP_REMOVE_BRICK;
+ }
+ force = 0;
+ break;
case GF_OP_CMD_COMMIT:
- force = 1;
- break;
+ force = 1;
+ break;
case GF_OP_CMD_DETACH_COMMIT:
case GF_OP_CMD_DETACH_COMMIT_FORCE:
- glusterd_op_perform_detach_tier (volinfo);
- detach_commit = 1;
-
- /* Disabling ctr when detaching a tier, since
- * currently tier is the only consumer of ctr.
- * Revisit this code when this constraint no
- * longer exist.
+ glusterd_op_perform_detach_tier(volinfo);
+ detach_commit = 1;
+
+ /* Disabling ctr when detaching a tier, since
+ * currently tier is the only consumer of ctr.
+ * Revisit this code when this constraint no
+ * longer exist.
+ */
+ dict_deln(volinfo->dict, "features.ctr-enabled",
+ SLEN("features.ctr-enabled"));
+ dict_deln(volinfo->dict, "cluster.tier-mode",
+ SLEN("cluster.tier-mode"));
+
+ hot_shd_key = gd_get_shd_key(volinfo->tier_info.hot_type);
+ cold_shd_key = gd_get_shd_key(volinfo->tier_info.cold_type);
+ if (hot_shd_key) {
+ /*
+ * Since post detach, shd graph will not contain hot
+ * tier. So we need to clear option set for hot tier.
+ * For a tiered volume there can be different key
+ * for both hot and cold. If hot tier is shd compatible
+ * then we need to remove the configured value when
+ * detaching a tier, only if the key's are different or
+ * cold key is NULL. So we will set delete_key first,
+ * and if cold key is not null and they are equal then
+ * we will clear the flag. Otherwise we will delete the
+ * key.
*/
- dict_deln (volinfo->dict, "features.ctr-enabled",
- SLEN ("features.ctr-enabled"));
- dict_deln (volinfo->dict, "cluster.tier-mode",
- SLEN ("cluster.tier-mode"));
-
- hot_shd_key = gd_get_shd_key (volinfo->tier_info.hot_type);
- cold_shd_key = gd_get_shd_key (volinfo->tier_info.cold_type);
- if (hot_shd_key) {
- /*
- * Since post detach, shd graph will not contain hot
- * tier. So we need to clear option set for hot tier.
- * For a tiered volume there can be different key
- * for both hot and cold. If hot tier is shd compatible
- * then we need to remove the configured value when
- * detaching a tier, only if the key's are different or
- * cold key is NULL. So we will set delete_key first,
- * and if cold key is not null and they are equal then
- * we will clear the flag. Otherwise we will delete the
- * key.
- */
- if (cold_shd_key)
- delete_key = strcmp (hot_shd_key, cold_shd_key);
- if (delete_key)
- dict_del (volinfo->dict, hot_shd_key);
- }
- /* fall through */
+ if (cold_shd_key)
+ delete_key = strcmp(hot_shd_key, cold_shd_key);
+ if (delete_key)
+ dict_del(volinfo->dict, hot_shd_key);
+ }
+ /* fall through */
case GF_OP_CMD_COMMIT_FORCE:
- if (volinfo->decommission_in_progress) {
- if (volinfo->rebal.defrag) {
- LOCK (&volinfo->rebal.defrag->lock);
- /* Fake 'rebalance-complete' so the graph change
- happens right away */
- volinfo->rebal.defrag_status =
- GF_DEFRAG_STATUS_COMPLETE;
-
- UNLOCK (&volinfo->rebal.defrag->lock);
- }
- /* Graph change happens in rebalance _cbk function,
- no need to do anything here */
- /* TODO: '_cbk' function is not doing anything for now */
- }
-
- ret = 0;
- force = 1;
- break;
+ if (volinfo->decommission_in_progress) {
+ if (volinfo->rebal.defrag) {
+ LOCK(&volinfo->rebal.defrag->lock);
+ /* Fake 'rebalance-complete' so the graph change
+ happens right away */
+ volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_COMPLETE;
+
+ UNLOCK(&volinfo->rebal.defrag->lock);
+ }
+ /* Graph change happens in rebalance _cbk function,
+ no need to do anything here */
+ /* TODO: '_cbk' function is not doing anything for now */
+ }
+
+ ret = 0;
+ force = 1;
+ break;
+ }
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get count");
+ goto out;
+ }
+
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER)
+ count = glusterd_set_detach_bricks(dict, volinfo);
+
+ /* Save the list of bricks for later usage only on starting a
+ * remove-brick. Right now this is required for displaying the task
+ * parameters with task status in volume status.
+ */
+
+ if (start_remove) {
+ bricks_dict = dict_new();
+ if (!bricks_dict) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_int32n(bricks_dict, "count", SLEN("count"), count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to save remove-brick count");
+ goto out;
}
+ }
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &count);
+ while (i <= count) {
+ keylen = snprintf(key, sizeof(key), "brick%d", i);
+ ret = dict_get_strn(dict, key, keylen, &brick);
if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get count");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get %s", key);
+ goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- count = glusterd_set_detach_bricks(dict, volinfo);
-
- /* Save the list of bricks for later usage only on starting a
- * remove-brick. Right now this is required for displaying the task
- * parameters with task status in volume status.
- */
-
if (start_remove) {
- bricks_dict = dict_new ();
- if (!bricks_dict) {
- ret = -1;
- goto out;
- }
- ret = dict_set_int32n (bricks_dict, "count",
- SLEN ("count"), count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "Failed to save remove-brick count");
- goto out;
- }
- }
-
- while ( i <= count) {
- keylen = snprintf (key, sizeof(key), "brick%d", i);
- ret = dict_get_strn (dict, key, keylen, &brick);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Unable to get %s",
- key);
- goto out;
- }
-
- if (start_remove) {
- brick_tmpstr = gf_strdup (brick);
- if (!brick_tmpstr) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY,
- "Failed to duplicate brick name");
- goto out;
- }
- ret = dict_set_dynstrn (bricks_dict, key, keylen,
- brick_tmpstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "Failed to add brick to dict");
- goto out;
- }
- brick_tmpstr = NULL;
- }
-
- ret = glusterd_op_perform_remove_brick (volinfo, brick, force,
- &need_rebalance);
- if (ret)
- goto out;
- i++;
- }
-
- if (detach_commit) {
- /* Clear related information from volinfo */
- tier_info = ((void *)(&volinfo->tier_info));
- memset (tier_info, 0, sizeof (volinfo->tier_info));
- }
-
- if (start_remove)
- volinfo->rebal.dict = dict_ref (bricks_dict);
-
- ret = dict_get_int32n (dict, "replica-count",
- SLEN ("replica-count"), &replica_count);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_FAILED,
- "changing replica count %d to %d on volume %s",
- volinfo->replica_count, replica_count,
- volinfo->volname);
- volinfo->replica_count = replica_count;
- /* A reduction in replica count implies an arbiter volume
- * earlier is now no longer one. */
- if (volinfo->arbiter_count)
- volinfo->arbiter_count = 0;
- volinfo->sub_count = replica_count;
- volinfo->dist_leaf_count = glusterd_get_dist_leaf_count (volinfo);
-
- /*
- * volinfo->type and sub_count have already been set for
- * volumes undergoing a detach operation, they should not
- * be modified here.
- */
- if ((replica_count == 1) && (cmd != GF_OP_CMD_DETACH_COMMIT) &&
- (cmd != GF_OP_CMD_DETACH_COMMIT_FORCE)) {
- if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
- volinfo->type = GF_CLUSTER_TYPE_NONE;
- /* backward compatibility */
- volinfo->sub_count = 0;
- } else {
- volinfo->type = GF_CLUSTER_TYPE_STRIPE;
- /* backward compatibility */
- volinfo->sub_count = volinfo->dist_leaf_count;
- }
- }
+ brick_tmpstr = gf_strdup(brick);
+ if (!brick_tmpstr) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Failed to duplicate brick name");
+ goto out;
+ }
+ ret = dict_set_dynstrn(bricks_dict, key, keylen, brick_tmpstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to add brick to dict");
+ goto out;
+ }
+ brick_tmpstr = NULL;
}
- volinfo->subvol_count = (volinfo->brick_count /
- volinfo->dist_leaf_count);
- if (!glusterd_is_volume_replicate (volinfo) &&
- conf->op_version >= GD_OP_VERSION_3_12_2) {
- ret = dict_set_nstrn (volinfo->dict,
- "performance.client-io-threads",
- SLEN ("performance.client-io-threads"),
- "on", SLEN ("on"));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to set "
- "performance.client-io-threads to on");
- goto out;
- }
- }
+ ret = glusterd_op_perform_remove_brick(volinfo, brick, force,
+ &need_rebalance);
+ if (ret)
+ goto out;
+ i++;
+ }
+
+ if (detach_commit) {
+ /* Clear related information from volinfo */
+ tier_info = ((void *)(&volinfo->tier_info));
+ memset(tier_info, 0, sizeof(volinfo->tier_info));
+ }
+
+ if (start_remove)
+ volinfo->rebal.dict = dict_ref(bricks_dict);
+
+ ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
+ &replica_count);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
+ "changing replica count %d to %d on volume %s",
+ volinfo->replica_count, replica_count, volinfo->volname);
+ volinfo->replica_count = replica_count;
+ /* A reduction in replica count implies an arbiter volume
+ * earlier is now no longer one. */
+ if (volinfo->arbiter_count)
+ volinfo->arbiter_count = 0;
+ volinfo->sub_count = replica_count;
+ volinfo->dist_leaf_count = glusterd_get_dist_leaf_count(volinfo);
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
+ /*
+ * volinfo->type and sub_count have already been set for
+ * volumes undergoing a detach operation, they should not
+ * be modified here.
+ */
+ if ((replica_count == 1) && (cmd != GF_OP_CMD_DETACH_COMMIT) &&
+ (cmd != GF_OP_CMD_DETACH_COMMIT_FORCE)) {
+ if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
+ volinfo->type = GF_CLUSTER_TYPE_NONE;
+ /* backward compatibility */
+ volinfo->sub_count = 0;
+ } else {
+ volinfo->type = GF_CLUSTER_TYPE_STRIPE;
+ /* backward compatibility */
+ volinfo->sub_count = volinfo->dist_leaf_count;
+ }
+ }
+ }
+ volinfo->subvol_count = (volinfo->brick_count / volinfo->dist_leaf_count);
+
+ if (!glusterd_is_volume_replicate(volinfo) &&
+ conf->op_version >= GD_OP_VERSION_3_12_2) {
+ ret = dict_set_nstrn(volinfo->dict, "performance.client-io-threads",
+ SLEN("performance.client-io-threads"), "on",
+ SLEN("on"));
if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles");
- goto out;
- }
-
- ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set "
+ "performance.client-io-threads to on");
+ goto out;
+ }
+ }
+
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "failed to create volfiles");
+ goto out;
+ }
+
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_STORE_FAIL,
+ "failed to store volinfo");
+ goto out;
+ }
+
+ if (start_remove && volinfo->status == GLUSTERD_STATUS_STARTED) {
+ ret = glusterd_svcs_reconfigure();
if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_VOLINFO_STORE_FAIL, "failed to store volinfo");
- goto out;
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL,
+ "Unable to reconfigure NFS-Server");
+ goto out;
}
+ }
- if (start_remove &&
- volinfo->status == GLUSTERD_STATUS_STARTED) {
- ret = glusterd_svcs_reconfigure ();
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_NFS_RECONF_FAIL,
- "Unable to reconfigure NFS-Server");
- goto out;
- }
- }
-
- /* Need to reset the defrag/rebalance status accordingly */
- switch (volinfo->rebal.defrag_status) {
+ /* Need to reset the defrag/rebalance status accordingly */
+ switch (volinfo->rebal.defrag_status) {
case GF_DEFRAG_STATUS_FAILED:
case GF_DEFRAG_STATUS_COMPLETE:
- volinfo->rebal.defrag_status = 0;
+ volinfo->rebal.defrag_status = 0;
/* FALLTHROUGH */
default:
- break;
- }
- if (!force && need_rebalance) {
- if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
- volinfo->rebal.commit_hash = commit_hash;
- }
- /* perform the rebalance operations */
- defrag_cmd = GF_DEFRAG_CMD_START_FORCE;
- if (cmd == GF_OP_CMD_DETACH_START)
- defrag_cmd = GF_DEFRAG_CMD_START_DETACH_TIER;
- /*
- * We need to set this *before* we issue commands to the
- * bricks, or else we might end up setting it after the bricks
- * have responded. If we fail to send the request(s) we'll
- * clear it ourselves because nobody else will.
- */
- volinfo->decommission_in_progress = 1;
- char err_str[4096] = "";
- ret = glusterd_handle_defrag_start
- (volinfo, err_str, sizeof (err_str),
- defrag_cmd,
- glusterd_remove_brick_migrate_cbk, GD_OP_REMOVE_BRICK);
+ break;
+ }
+ if (!force && need_rebalance) {
+ if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) {
+ volinfo->rebal.commit_hash = commit_hash;
+ }
+ /* perform the rebalance operations */
+ defrag_cmd = GF_DEFRAG_CMD_START_FORCE;
+ if (cmd == GF_OP_CMD_DETACH_START)
+ defrag_cmd = GF_DEFRAG_CMD_START_DETACH_TIER;
+ /*
+ * We need to set this *before* we issue commands to the
+ * bricks, or else we might end up setting it after the bricks
+ * have responded. If we fail to send the request(s) we'll
+ * clear it ourselves because nobody else will.
+ */
+ volinfo->decommission_in_progress = 1;
+ char err_str[4096] = "";
+ ret = glusterd_handle_defrag_start(
+ volinfo, err_str, sizeof(err_str), defrag_cmd,
+ glusterd_remove_brick_migrate_cbk, GD_OP_REMOVE_BRICK);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REBALANCE_START_FAIL,
- "failed to start the rebalance");
- /* TBD: shouldn't we do more than print a message? */
- volinfo->decommission_in_progress = 0;
- if (op_errstr)
- *op_errstr = gf_strdup (err_str);
- }
- } else {
- if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_svcs_manager (volinfo);
- }
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REBALANCE_START_FAIL,
+ "failed to start the rebalance");
+ /* TBD: shouldn't we do more than print a message? */
+ volinfo->decommission_in_progress = 0;
+ if (op_errstr)
+ *op_errstr = gf_strdup(err_str);
+ }
+ } else {
+ if (GLUSTERD_STATUS_STARTED == volinfo->status)
+ ret = glusterd_svcs_manager(volinfo);
+ }
out:
- GF_FREE (brick_tmpstr);
- if (bricks_dict)
- dict_unref (bricks_dict);
+ GF_FREE(brick_tmpstr);
+ if (bricks_dict)
+ dict_unref(bricks_dict);
- return ret;
+ return ret;
}
int
-glusterd_op_stage_barrier (dict_t *dict, char **op_errstr)
+glusterd_op_stage_barrier(dict_t *dict, char **op_errstr)
{
- int ret = -1;
- xlator_t *this = NULL;
- char *volname = NULL;
- glusterd_volinfo_t *vol = NULL;
-
- GF_ASSERT (dict);
- this = THIS;
- GF_ASSERT (this);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Volname not present in "
- "dict");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &vol);
- if (ret) {
- gf_asprintf (op_errstr, "Volume %s does not exist", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", *op_errstr);
- goto out;
- }
-
- if (!glusterd_is_volume_started (vol)) {
- gf_asprintf (op_errstr, "Volume %s is not started", volname);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str_boolean (dict, "barrier", -1);
- if (ret == -1) {
- gf_asprintf (op_errstr, "Barrier op for volume %s not present "
- "in dict", volname);
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", *op_errstr);
- goto out;
- }
- ret = 0;
+ int ret = -1;
+ xlator_t *this = NULL;
+ char *volname = NULL;
+ glusterd_volinfo_t *vol = NULL;
+
+ GF_ASSERT(dict);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Volname not present in "
+ "dict");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &vol);
+ if (ret) {
+ gf_asprintf(op_errstr, "Volume %s does not exist", volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
+ *op_errstr);
+ goto out;
+ }
+
+ if (!glusterd_is_volume_started(vol)) {
+ gf_asprintf(op_errstr, "Volume %s is not started", volname);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str_boolean(dict, "barrier", -1);
+ if (ret == -1) {
+ gf_asprintf(op_errstr,
+ "Barrier op for volume %s not present "
+ "in dict",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ *op_errstr);
+ goto out;
+ }
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_op_barrier (dict_t *dict, char **op_errstr)
+glusterd_op_barrier(dict_t *dict, char **op_errstr)
{
- int ret = -1;
- xlator_t *this = NULL;
- char *volname = NULL;
- glusterd_volinfo_t *vol = NULL;
- char *barrier_op = NULL;
-
- GF_ASSERT (dict);
- this = THIS;
- GF_ASSERT (this);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Volname not present in "
- "dict");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &vol);
- if (ret) {
- gf_asprintf (op_errstr, "Volume %s does not exist", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", *op_errstr);
- goto out;
- }
-
- ret = dict_get_strn (dict, "barrier", SLEN ("barrier"), &barrier_op);
- if (ret) {
- gf_asprintf (op_errstr, "Barrier op for volume %s not present "
- "in dict", volname);
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", *op_errstr);
- goto out;
- }
-
- ret = dict_set_dynstr_with_alloc (vol->dict, "features.barrier",
- barrier_op);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set barrier op in"
- " volume option dict");
- goto out;
- }
-
- gd_update_volume_op_versions (vol);
- ret = glusterd_create_volfiles (vol);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL, "Failed to create volfiles");
- goto out;
- }
- ret = glusterd_store_volinfo (vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ int ret = -1;
+ xlator_t *this = NULL;
+ char *volname = NULL;
+ glusterd_volinfo_t *vol = NULL;
+ char *barrier_op = NULL;
+
+ GF_ASSERT(dict);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Volname not present in "
+ "dict");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &vol);
+ if (ret) {
+ gf_asprintf(op_errstr, "Volume %s does not exist", volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
+ *op_errstr);
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "barrier", SLEN("barrier"), &barrier_op);
+ if (ret) {
+ gf_asprintf(op_errstr,
+ "Barrier op for volume %s not present "
+ "in dict",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ *op_errstr);
+ goto out;
+ }
+
+ ret = dict_set_dynstr_with_alloc(vol->dict, "features.barrier", barrier_op);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set barrier op in"
+ " volume option dict");
+ goto out;
+ }
+
+ gd_update_volume_op_versions(vol);
+ ret = glusterd_create_volfiles(vol);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Failed to create volfiles");
+ goto out;
+ }
+ ret = glusterd_store_volinfo(vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-__glusterd_handle_add_tier_brick (rpcsvc_request_t *req)
+__glusterd_handle_add_tier_brick(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,} };
- dict_t *dict = NULL;
- char *bricks = NULL;
- char *volname = NULL;
- int brick_count = 0;
- void *cli_rsp = NULL;
- char err_str[2048] = "";
- gf_cli_rsp rsp = {0,};
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- int32_t replica_count = 0;
- int32_t arbiter_count = 0;
- int type = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- GF_VALIDATE_OR_GOTO (this->name, req, out);
-
- ret = xdr_to_generic (req->msg[0], &cli_req,
- (xdrproc_t)xdr_gf_cli_req);
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ char *bricks = NULL;
+ char *volname = NULL;
+ int brick_count = 0;
+ void *cli_rsp = NULL;
+ char err_str[2048] = "";
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ int32_t replica_count = 0;
+ int32_t arbiter_count = 0;
+ int type = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, req, out);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ /*failed to decode msg*/
+ req->rpc_err = GARBAGE_ARGS;
+ snprintf(err_str, sizeof(err_str), "Garbage args received");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, "%s",
+ err_str);
+ goto out;
+ }
+
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_ADD_BRICK_REQ_RECVD,
+ "Received add brick req");
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- /*failed to decode msg*/
- req->rpc_err = GARBAGE_ARGS;
- snprintf (err_str, sizeof (err_str), "Garbage args received");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_GARBAGE_ARGS, "%s", err_str);
- goto out;
- }
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_ADD_BRICK_REQ_RECVD, "Received add brick req");
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- }
- }
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
-
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "name");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- if (!glusterd_check_volume_exists (volname)) {
- snprintf (err_str, sizeof (err_str), "Volume %s does not exist",
- volname);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "%s", err_str);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &brick_count);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "brick count");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- ret = dict_get_int32n (dict, "replica-count",
- SLEN ("replica-count"), &replica_count);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_SUCCESS, "replica-count is %d",
- replica_count);
- }
-
- ret = dict_get_int32n (dict, "arbiter-count",
- SLEN ("arbiter-count"), &arbiter_count);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_SUCCESS, "arbiter-count is %d",
- arbiter_count);
- }
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ }
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volume "
+ "name");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ if (!glusterd_check_volume_exists(volname)) {
+ snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
+ err_str);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volume "
+ "brick count");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
+ &replica_count);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
+ "replica-count is %d", replica_count);
+ }
+
+ ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
+ &arbiter_count);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
+ "arbiter-count is %d", arbiter_count);
+ }
+
+ if (!dict_getn(dict, "force", SLEN("force"))) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Failed to get flag");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volinfo "
+ "for volume name %s",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s",
+ err_str);
+ goto out;
+ }
+
+ if (glusterd_is_tiering_supported(err_str) == _gf_false) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
+ "Tiering not supported at this version");
+ ret = -1;
+ goto out;
+ }
- if (!dict_getn (dict, "force", SLEN ("force"))) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "Failed to get flag");
- ret = -1;
- goto out;
+ if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ snprintf(err_str, sizeof(err_str), "Volume %s is already a tier.",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_TIER, "%s",
+ err_str);
+ ret = -1;
+ goto out;
}
- ret = glusterd_volinfo_find (volname, &volinfo);
+ ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type);
if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volinfo "
- "for volume name %s", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL, "%s", err_str);
- goto out;
-
- }
-
- if (glusterd_is_tiering_supported(err_str) == _gf_false) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VERSION_UNSUPPORTED,
- "Tiering not supported at this version");
- ret = -1;
- goto out;
- }
-
- if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- snprintf (err_str, sizeof (err_str),
- "Volume %s is already a tier.", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_ALREADY_TIER, "%s", err_str);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n (dict, "hot-type", SLEN ("hot-type"),
- &type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED,
- "failed to get type from dictionary");
- goto out;
- }
-
- }
-
- ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks);
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "failed to get type from dictionary");
+ goto out;
+ }
+ }
+
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volume "
+ "bricks");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ if (type != volinfo->type) {
+ ret = dict_set_int32n(dict, "type", SLEN("type"), type);
if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "bricks");
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "failed to set the new type in dict");
+ goto out;
}
+ }
- if (type != volinfo->type) {
- ret = dict_set_int32n (dict, "type", SLEN ("type"), type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "failed to set the new type in dict");
- goto out;
- }
- }
-
- ret = glusterd_mgmt_v3_initiate_all_phases (req,
- GD_OP_ADD_TIER_BRICK,
- dict);
+ ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_ADD_TIER_BRICK, dict);
out:
- if (ret) {
- rsp.op_ret = -1;
- rsp.op_errno = 0;
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- rsp.op_errstr = err_str;
- cli_rsp = &rsp;
- glusterd_to_cli (req, cli_rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp, dict);
- ret = 0; /*sent error to cli, prevent second reply*/
- }
+ if (ret) {
+ rsp.op_ret = -1;
+ rsp.op_errno = 0;
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ rsp.op_errstr = err_str;
+ cli_rsp = &rsp;
+ glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
+ dict);
+ ret = 0; /*sent error to cli, prevent second reply*/
+ }
- free (cli_req.dict.dict_val); /*its malloced by xdr*/
+ free(cli_req.dict.dict_val); /*its malloced by xdr*/
- return ret;
+ return ret;
}
int
-glusterd_handle_add_tier_brick (rpcsvc_request_t *req)
+glusterd_handle_add_tier_brick(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_add_tier_brick);
+ return glusterd_big_locked_handler(req, __glusterd_handle_add_tier_brick);
}
int
-glusterd_handle_attach_tier (rpcsvc_request_t *req)
+glusterd_handle_attach_tier(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_add_brick);
+ return glusterd_big_locked_handler(req, __glusterd_handle_add_brick);
}
int
-glusterd_handle_detach_tier (rpcsvc_request_t *req)
+glusterd_handle_detach_tier(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_remove_brick);
+ return glusterd_big_locked_handler(req, __glusterd_handle_remove_brick);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-helper.c b/xlators/mgmt/glusterd/src/glusterd-conn-helper.c
index bfa9d02aa1b..a7f54ec24b7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-conn-helper.c
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-helper.c
@@ -15,7 +15,7 @@
#include <urcu/rculist.h>
glusterd_svc_t *
-glusterd_conn_get_svc_object (glusterd_conn_t *conn)
+glusterd_conn_get_svc_object(glusterd_conn_t *conn)
{
- return cds_list_entry (conn, glusterd_svc_t, conn);
+ return cds_list_entry(conn, glusterd_svc_t, conn);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
index a2c12ed5e32..87025076d27 100644
--- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c
@@ -17,122 +17,119 @@
#include "glusterd-messages.h"
int
-glusterd_conn_init (glusterd_conn_t *conn, char *sockpath,
- int frame_timeout, glusterd_conn_notify_t notify)
+glusterd_conn_init(glusterd_conn_t *conn, char *sockpath, int frame_timeout,
+ glusterd_conn_notify_t notify)
{
- int ret = -1;
- dict_t *options = NULL;
- struct rpc_clnt *rpc = NULL;
- xlator_t *this = THIS;
- glusterd_svc_t *svc = NULL;
-
- if (!this)
- goto out;
-
- svc = glusterd_conn_get_svc_object (conn);
- if (!svc) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SVC_GET_FAIL, "Failed to get the service");
- goto out;
- }
-
- ret = rpc_transport_unix_options_build (&options, sockpath,
- frame_timeout);
- if (ret)
- goto out;
-
- ret = dict_set_nstrn (options, "transport.socket.ignore-enoent",
- SLEN ("transport.socket.ignore-enoent"),
- "on", SLEN ("on"));
- if (ret)
- goto out;
-
- /* @options is free'd by rpc_transport when destroyed */
- rpc = rpc_clnt_new (options, this, (char *)svc->name, 16);
- if (!rpc) {
- ret = -1;
- goto out;
- }
-
- ret = rpc_clnt_register_notify (rpc, glusterd_conn_common_notify,
- conn);
- if (ret)
- goto out;
-
- ret = snprintf (conn->sockpath, sizeof (conn->sockpath), "%s",
- sockpath);
- if (ret < 0)
- goto out;
- else
- ret = 0;
-
- conn->frame_timeout = frame_timeout;
- conn->rpc = rpc;
- conn->notify = notify;
+ int ret = -1;
+ dict_t *options = NULL;
+ struct rpc_clnt *rpc = NULL;
+ xlator_t *this = THIS;
+ glusterd_svc_t *svc = NULL;
+
+ if (!this)
+ goto out;
+
+ svc = glusterd_conn_get_svc_object(conn);
+ if (!svc) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL,
+ "Failed to get the service");
+ goto out;
+ }
+
+ ret = rpc_transport_unix_options_build(&options, sockpath, frame_timeout);
+ if (ret)
+ goto out;
+
+ ret = dict_set_nstrn(options, "transport.socket.ignore-enoent",
+ SLEN("transport.socket.ignore-enoent"), "on",
+ SLEN("on"));
+ if (ret)
+ goto out;
+
+ /* @options is free'd by rpc_transport when destroyed */
+ rpc = rpc_clnt_new(options, this, (char *)svc->name, 16);
+ if (!rpc) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = rpc_clnt_register_notify(rpc, glusterd_conn_common_notify, conn);
+ if (ret)
+ goto out;
+
+ ret = snprintf(conn->sockpath, sizeof(conn->sockpath), "%s", sockpath);
+ if (ret < 0)
+ goto out;
+ else
+ ret = 0;
+
+ conn->frame_timeout = frame_timeout;
+ conn->rpc = rpc;
+ conn->notify = notify;
out:
- if (ret) {
- if (rpc) {
- rpc_clnt_unref (rpc);
- rpc = NULL;
- }
+ if (ret) {
+ if (rpc) {
+ rpc_clnt_unref(rpc);
+ rpc = NULL;
}
- return ret;
+ }
+ return ret;
}
int
-glusterd_conn_term (glusterd_conn_t *conn)
+glusterd_conn_term(glusterd_conn_t *conn)
{
- rpc_clnt_unref (conn->rpc);
- return 0;
+ rpc_clnt_unref(conn->rpc);
+ return 0;
}
int
-glusterd_conn_connect (glusterd_conn_t *conn)
+glusterd_conn_connect(glusterd_conn_t *conn)
{
- return rpc_clnt_start (conn->rpc);
+ return rpc_clnt_start(conn->rpc);
}
int
-glusterd_conn_disconnect (glusterd_conn_t *conn)
+glusterd_conn_disconnect(glusterd_conn_t *conn)
{
- rpc_clnt_disconnect (conn->rpc);
+ rpc_clnt_disconnect(conn->rpc);
- return 0;
+ return 0;
}
-
int
-__glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
+__glusterd_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- glusterd_conn_t *conn = mydata;
+ glusterd_conn_t *conn = mydata;
- /* Silently ignoring this error, exactly like the current
- * implementation */
- if (!conn)
- return 0;
+ /* Silently ignoring this error, exactly like the current
+ * implementation */
+ if (!conn)
+ return 0;
- return conn->notify (conn, event);
+ return conn->notify(conn, event);
}
int
-glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
+glusterd_conn_common_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- return glusterd_big_locked_notify
- (rpc, mydata, event, data,
- __glusterd_conn_common_notify);
+ return glusterd_big_locked_notify(rpc, mydata, event, data,
+ __glusterd_conn_common_notify);
}
int32_t
-glusterd_conn_build_socket_filepath (char *rundir, uuid_t uuid,
- char *socketpath, int len)
+glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath,
+ int len)
{
- char sockfilepath[PATH_MAX] = {0,};
+ char sockfilepath[PATH_MAX] = {
+ 0,
+ };
- snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s",
- rundir, uuid_utoa (uuid));
+ snprintf(sockfilepath, sizeof(sockfilepath), "%s/run-%s", rundir,
+ uuid_utoa(uuid));
- glusterd_set_socket_filepath (sockfilepath, socketpath, len);
- return 0;
+ glusterd_set_socket_filepath(sockfilepath, socketpath, len);
+ return 0;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
index cf003ac895e..075fdf74369 100644
--- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
+++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c
@@ -24,357 +24,353 @@
#include <signal.h>
static int
-dict_get_param (dict_t *dict, char *key, char **param);
+dict_get_param(dict_t *dict, char *key, char **param);
struct gsync_config_opt_vals_ gsync_confopt_vals[] = {
- {.op_name = "change_detector",
- .no_of_pos_vals = 2,
- .case_sensitive = _gf_true,
- .values = {"xsync", "changelog"},
- },
- {.op_name = "special_sync_mode",
- .no_of_pos_vals = 2,
- .case_sensitive = _gf_true,
- .values = {"partial", "recover"}
- },
- {.op_name = "log-level",
- .no_of_pos_vals = 5,
- .case_sensitive = _gf_false,
- .values = {"critical", "error", "warning", "info", "debug"}
- },
- {.op_name = "use-tarssh",
- .no_of_pos_vals = 6,
- .case_sensitive = _gf_false,
- .values = {"true", "false", "0", "1", "yes", "no"}
- },
- {.op_name = "ignore_deletes",
- .no_of_pos_vals = 6,
- .case_sensitive = _gf_false,
- .values = {"true", "false", "0", "1", "yes", "no"}
- },
- {.op_name = "use_meta_volume",
- .no_of_pos_vals = 6,
- .case_sensitive = _gf_false,
- .values = {"true", "false", "0", "1", "yes", "no"}
- },
- {.op_name = "use-meta-volume",
- .no_of_pos_vals = 6,
- .case_sensitive = _gf_false,
- .values = {"true", "false", "0", "1", "yes", "no"}
- },
- {.op_name = NULL,
- },
+ {
+ .op_name = "change_detector",
+ .no_of_pos_vals = 2,
+ .case_sensitive = _gf_true,
+ .values = {"xsync", "changelog"},
+ },
+ {.op_name = "special_sync_mode",
+ .no_of_pos_vals = 2,
+ .case_sensitive = _gf_true,
+ .values = {"partial", "recover"}},
+ {.op_name = "log-level",
+ .no_of_pos_vals = 5,
+ .case_sensitive = _gf_false,
+ .values = {"critical", "error", "warning", "info", "debug"}},
+ {.op_name = "use-tarssh",
+ .no_of_pos_vals = 6,
+ .case_sensitive = _gf_false,
+ .values = {"true", "false", "0", "1", "yes", "no"}},
+ {.op_name = "ignore_deletes",
+ .no_of_pos_vals = 6,
+ .case_sensitive = _gf_false,
+ .values = {"true", "false", "0", "1", "yes", "no"}},
+ {.op_name = "use_meta_volume",
+ .no_of_pos_vals = 6,
+ .case_sensitive = _gf_false,
+ .values = {"true", "false", "0", "1", "yes", "no"}},
+ {.op_name = "use-meta-volume",
+ .no_of_pos_vals = 6,
+ .case_sensitive = _gf_false,
+ .values = {"true", "false", "0", "1", "yes", "no"}},
+ {
+ .op_name = NULL,
+ },
};
-static char *gsync_reserved_opts[] = {
- "gluster-command",
- "pid-file",
- "state-file",
- "session-owner",
- "state-socket-unencoded",
- "socketdir",
- "local-id",
- "local-path",
- "slave-id",
- NULL
-};
+static char *gsync_reserved_opts[] = {"gluster-command",
+ "pid-file",
+ "state-file",
+ "session-owner",
+ "state-socket-unencoded",
+ "socketdir",
+ "local-id",
+ "local-path",
+ "slave-id",
+ NULL};
-static char *gsync_no_restart_opts[] = {
- "checkpoint",
- "log_rsync_performance",
- "log-rsync-performance",
- NULL
-};
+static char *gsync_no_restart_opts[] = {"checkpoint", "log_rsync_performance",
+ "log-rsync-performance", NULL};
int
-__glusterd_handle_sys_exec (rpcsvc_request_t *req)
+__glusterd_handle_sys_exec(rpcsvc_request_t *req)
{
- int32_t ret = 0;
- dict_t *dict = NULL;
- gf_cli_req cli_req = {{0},};
- glusterd_op_t cli_op = GD_OP_SYS_EXEC;
- glusterd_conf_t *priv = NULL;
- char *host_uuid = NULL;
- char err_str[64] = {0,};
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = xdr_to_generic (req->msg[0], &cli_req,
- (xdrproc_t)xdr_gf_cli_req);
+ int32_t ret = 0;
+ dict_t *dict = NULL;
+ gf_cli_req cli_req = {
+ {0},
+ };
+ glusterd_op_t cli_op = GD_OP_SYS_EXEC;
+ glusterd_conf_t *priv = NULL;
+ char *host_uuid = NULL;
+ char err_str[64] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
- if (cli_req.dict.dict_len) {
- dict = dict_new ();
- if (!dict)
- goto out;
-
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
-
- host_uuid = gf_strdup (uuid_utoa(MY_UUID));
- if (host_uuid == NULL) {
- snprintf (err_str, sizeof (err_str), "Failed to get "
- "the uuid of local glusterd");
- ret = -1;
- goto out;
- }
-
- ret = dict_set_dynstr (dict, "host-uuid", host_uuid);
- if (ret)
- goto out;
+ host_uuid = gf_strdup(uuid_utoa(MY_UUID));
+ if (host_uuid == NULL) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to get "
+ "the uuid of local glusterd");
+ ret = -1;
+ goto out;
}
- ret = glusterd_op_begin_synctask (req, cli_op, dict);
+ ret = dict_set_dynstr(dict, "host-uuid", host_uuid);
+ if (ret)
+ goto out;
+ }
+
+ ret = glusterd_op_begin_synctask(req, cli_op, dict);
out:
- if (ret) {
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
- }
- return ret;
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
+ return ret;
}
int
-__glusterd_handle_copy_file (rpcsvc_request_t *req)
+__glusterd_handle_copy_file(rpcsvc_request_t *req)
{
- int32_t ret = 0;
- dict_t *dict = NULL;
- gf_cli_req cli_req = {{0},};
- glusterd_op_t cli_op = GD_OP_COPY_FILE;
- glusterd_conf_t *priv = NULL;
- char *host_uuid = NULL;
- char err_str[64] = {0,};
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = xdr_to_generic (req->msg[0], &cli_req,
- (xdrproc_t)xdr_gf_cli_req);
+ int32_t ret = 0;
+ dict_t *dict = NULL;
+ gf_cli_req cli_req = {
+ {0},
+ };
+ glusterd_op_t cli_op = GD_OP_COPY_FILE;
+ glusterd_conf_t *priv = NULL;
+ char *host_uuid = NULL;
+ char err_str[64] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to"
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
- if (cli_req.dict.dict_len) {
- dict = dict_new ();
- if (!dict)
- goto out;
-
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to"
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
-
- host_uuid = gf_strdup (uuid_utoa(MY_UUID));
- if (host_uuid == NULL) {
- snprintf (err_str, sizeof (err_str), "Failed to get "
- "the uuid of local glusterd");
- ret = -1;
- goto out;
- }
-
- ret = dict_set_dynstr (dict, "host-uuid", host_uuid);
- if (ret)
- goto out;
+ host_uuid = gf_strdup(uuid_utoa(MY_UUID));
+ if (host_uuid == NULL) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to get "
+ "the uuid of local glusterd");
+ ret = -1;
+ goto out;
}
- ret = glusterd_op_begin_synctask (req, cli_op, dict);
+ ret = dict_set_dynstr(dict, "host-uuid", host_uuid);
+ if (ret)
+ goto out;
+ }
+
+ ret = glusterd_op_begin_synctask(req, cli_op, dict);
out:
- if (ret) {
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
- }
- return ret;
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
+ return ret;
}
int
-__glusterd_handle_gsync_set (rpcsvc_request_t *req)
+__glusterd_handle_gsync_set(rpcsvc_request_t *req)
{
- int32_t ret = 0;
- dict_t *dict = NULL;
- gf_cli_req cli_req = {{0},};
- glusterd_op_t cli_op = GD_OP_GSYNC_SET;
- char *master = NULL;
- char *slave = NULL;
- char operation[64] = {0,};
- int type = 0;
- glusterd_conf_t *priv = NULL;
- char *host_uuid = NULL;
- char err_str[64] = {0,};
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = xdr_to_generic (req->msg[0], &cli_req,
- (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
-
- host_uuid = gf_strdup (uuid_utoa(MY_UUID));
- if (host_uuid == NULL) {
- snprintf (err_str, sizeof (err_str), "Failed to get "
- "the uuid of local glusterd");
- ret = -1;
- goto out;
- }
- ret = dict_set_dynstr (dict, "host-uuid", host_uuid);
- if (ret)
- goto out;
-
- }
-
- ret = dict_get_str (dict, "master", &master);
+ int32_t ret = 0;
+ dict_t *dict = NULL;
+ gf_cli_req cli_req = {
+ {0},
+ };
+ glusterd_op_t cli_op = GD_OP_GSYNC_SET;
+ char *master = NULL;
+ char *slave = NULL;
+ char operation[64] = {
+ 0,
+ };
+ int type = 0;
+ glusterd_conf_t *priv = NULL;
+ char *host_uuid = NULL;
+ char err_str[64] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
- "master not found, while handling "GEOREP" options");
- master = "(No Master)";
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
- ret = dict_get_str (dict, "slave", &slave);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
- "slave not found, while handling "GEOREP" options");
- slave = "(No Slave)";
+ host_uuid = gf_strdup(uuid_utoa(MY_UUID));
+ if (host_uuid == NULL) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to get "
+ "the uuid of local glusterd");
+ ret = -1;
+ goto out;
}
-
- ret = dict_get_int32 (dict, "type", &type);
- if (ret < 0) {
- snprintf (err_str, sizeof (err_str), "Command type not found "
- "while handling "GEOREP" options");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "%s", err_str);
- goto out;
- }
-
- switch (type) {
+ ret = dict_set_dynstr(dict, "host-uuid", host_uuid);
+ if (ret)
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "master", &master);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
+ "master not found, while handling " GEOREP " options");
+ master = "(No Master)";
+ }
+
+ ret = dict_get_str(dict, "slave", &slave);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
+ "slave not found, while handling " GEOREP " options");
+ slave = "(No Slave)";
+ }
+
+ ret = dict_get_int32(dict, "type", &type);
+ if (ret < 0) {
+ snprintf(err_str, sizeof(err_str),
+ "Command type not found "
+ "while handling " GEOREP " options");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ switch (type) {
case GF_GSYNC_OPTION_TYPE_CREATE:
- snprintf (operation, sizeof (operation), "create");
- cli_op = GD_OP_GSYNC_CREATE;
- break;
+ snprintf(operation, sizeof(operation), "create");
+ cli_op = GD_OP_GSYNC_CREATE;
+ break;
case GF_GSYNC_OPTION_TYPE_START:
- snprintf (operation, sizeof (operation), "start");
- break;
+ snprintf(operation, sizeof(operation), "start");
+ break;
case GF_GSYNC_OPTION_TYPE_STOP:
- snprintf (operation, sizeof (operation), "stop");
- break;
+ snprintf(operation, sizeof(operation), "stop");
+ break;
case GF_GSYNC_OPTION_TYPE_PAUSE:
- snprintf (operation, sizeof (operation), "pause");
- break;
+ snprintf(operation, sizeof(operation), "pause");
+ break;
case GF_GSYNC_OPTION_TYPE_RESUME:
- snprintf (operation, sizeof (operation), "resume");
- break;
+ snprintf(operation, sizeof(operation), "resume");
+ break;
case GF_GSYNC_OPTION_TYPE_CONFIG:
- snprintf (operation, sizeof (operation), "config");
- break;
+ snprintf(operation, sizeof(operation), "config");
+ break;
case GF_GSYNC_OPTION_TYPE_STATUS:
- snprintf (operation, sizeof (operation), "status");
- break;
- }
+ snprintf(operation, sizeof(operation), "status");
+ break;
+ }
- ret = glusterd_op_begin_synctask (req, cli_op, dict);
+ ret = glusterd_op_begin_synctask(req, cli_op, dict);
out:
- if (ret) {
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
- }
- return ret;
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
+ return ret;
}
int
-glusterd_handle_sys_exec (rpcsvc_request_t *req)
+glusterd_handle_sys_exec(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_sys_exec);
+ return glusterd_big_locked_handler(req, __glusterd_handle_sys_exec);
}
int
-glusterd_handle_copy_file (rpcsvc_request_t *req)
+glusterd_handle_copy_file(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_copy_file);
+ return glusterd_big_locked_handler(req, __glusterd_handle_copy_file);
}
int
-glusterd_handle_gsync_set (rpcsvc_request_t *req)
+glusterd_handle_gsync_set(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_gsync_set);
+ return glusterd_big_locked_handler(req, __glusterd_handle_gsync_set);
}
/*****
@@ -384,1583 +380,1574 @@ glusterd_handle_gsync_set (rpcsvc_request_t *req)
*****/
static void
-glusterd_urltransform_init (runner_t *runner, const char *transname)
+glusterd_urltransform_init(runner_t *runner, const char *transname)
{
- runinit (runner);
- runner_add_arg (runner, GSYNCD_PREFIX"/gsyncd");
- runner_argprintf (runner, "--%s-url", transname);
+ runinit(runner);
+ runner_add_arg(runner, GSYNCD_PREFIX "/gsyncd");
+ runner_argprintf(runner, "--%s-url", transname);
}
static void
-glusterd_urltransform_add (runner_t *runner, const char *url)
+glusterd_urltransform_add(runner_t *runner, const char *url)
{
- runner_add_arg (runner, url);
+ runner_add_arg(runner, url);
}
/* Helper routine to terminate just before slave_voluuid */
static int32_t
-parse_slave_url (char *slv_url, char **slave)
+parse_slave_url(char *slv_url, char **slave)
{
- char *tmp = NULL;
- xlator_t *this = NULL;
- int32_t ret = -1;
-
- this = THIS;
-
- /* slave format:
- * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */
- *slave = strchr (slv_url, ':');
- if (!(*slave)) {
- goto out;
- }
- (*slave)++;
-
- /* To terminate at : before slave volume uuid */
- tmp = strstr (*slave, "::");
- if (!tmp) {
- goto out;
- }
- tmp += 2;
- tmp = strchr (tmp, ':');
- if (!tmp)
- gf_msg_debug (this->name, 0, "old slave: %s!", *slave);
- else
- *tmp = '\0';
-
- ret = 0;
- gf_msg_debug (this->name, 0, "parsed slave: %s!", *slave);
+ char *tmp = NULL;
+ xlator_t *this = NULL;
+ int32_t ret = -1;
+
+ this = THIS;
+
+ /* slave format:
+ * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */
+ *slave = strchr(slv_url, ':');
+ if (!(*slave)) {
+ goto out;
+ }
+ (*slave)++;
+
+ /* To terminate at : before slave volume uuid */
+ tmp = strstr(*slave, "::");
+ if (!tmp) {
+ goto out;
+ }
+ tmp += 2;
+ tmp = strchr(tmp, ':');
+ if (!tmp)
+ gf_msg_debug(this->name, 0, "old slave: %s!", *slave);
+ else
+ *tmp = '\0';
+
+ ret = 0;
+ gf_msg_debug(this->name, 0, "parsed slave: %s!", *slave);
out:
- return ret;
+ return ret;
}
static int
-_glusterd_urltransform_add_iter (dict_t *dict, char *key, data_t *value, void *data)
+_glusterd_urltransform_add_iter(dict_t *dict, char *key, data_t *value,
+ void *data)
{
- runner_t *runner = (runner_t *)data;
- char slv_url[VOLINFO_SLAVE_URL_MAX] = {0};
- char *slave = NULL;
- xlator_t *this = NULL;
- int32_t ret = -1;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- gf_msg_debug (this->name, 0, "value->data %s", value->data);
-
- if (snprintf (slv_url, sizeof(slv_url), "%s", value->data) >=
- sizeof (slv_url)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "Error in copying slave: %s!", value->data);
- goto out;
- }
-
- ret = parse_slave_url (slv_url, &slave);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "Error in parsing slave: %s!", value->data);
- goto out;
- }
-
- runner_add_arg (runner, slave);
- ret = 0;
+ runner_t *runner = (runner_t *)data;
+ char slv_url[VOLINFO_SLAVE_URL_MAX] = {0};
+ char *slave = NULL;
+ xlator_t *this = NULL;
+ int32_t ret = -1;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ gf_msg_debug(this->name, 0, "value->data %s", value->data);
+
+ if (snprintf(slv_url, sizeof(slv_url), "%s", value->data) >=
+ sizeof(slv_url)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "Error in copying slave: %s!", value->data);
+ goto out;
+ }
+
+ ret = parse_slave_url(slv_url, &slave);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "Error in parsing slave: %s!", value->data);
+ goto out;
+ }
+
+ runner_add_arg(runner, slave);
+ ret = 0;
out:
- return ret;
+ return ret;
}
static void
-glusterd_urltransform_free (char **linearr, unsigned n)
+glusterd_urltransform_free(char **linearr, unsigned n)
{
- int i = 0;
+ int i = 0;
- for (; i < n; i++)
- GF_FREE (linearr[i]);
+ for (; i < n; i++)
+ GF_FREE(linearr[i]);
- GF_FREE (linearr);
+ GF_FREE(linearr);
}
static int
-glusterd_urltransform (runner_t *runner, char ***linearrp)
+glusterd_urltransform(runner_t *runner, char ***linearrp)
{
- char **linearr = NULL;
- char *line = NULL;
- unsigned arr_len = 32;
- unsigned arr_idx = 0;
- gf_boolean_t error = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- linearr = GF_CALLOC (arr_len, sizeof (char *), gf_gld_mt_linearr);
- if (!linearr) {
+ char **linearr = NULL;
+ char *line = NULL;
+ unsigned arr_len = 32;
+ unsigned arr_idx = 0;
+ gf_boolean_t error = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ linearr = GF_CALLOC(arr_len, sizeof(char *), gf_gld_mt_linearr);
+ if (!linearr) {
+ error = _gf_true;
+ goto out;
+ }
+
+ runner_redir(runner, STDOUT_FILENO, RUN_PIPE);
+ if (runner_start(runner) != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SPAWNING_CHILD_FAILED,
+ "spawning child failed");
+
+ error = _gf_true;
+ goto out;
+ }
+
+ arr_idx = 0;
+ for (;;) {
+ size_t len;
+ line = GF_MALLOC(1024, gf_gld_mt_linebuf);
+ if (!line) {
+ error = _gf_true;
+ goto out;
+ }
+
+ if (fgets(line, 1024, runner_chio(runner, STDOUT_FILENO)) == NULL) {
+ GF_FREE(line);
+ break;
+ }
+
+ len = strlen(line);
+ if (len == 0 || line[len - 1] != '\n') {
+ GF_FREE(line);
+ error = _gf_true;
+ goto out;
+ }
+ line[len - 1] = '\0';
+
+ if (arr_idx == arr_len) {
+ void *p = linearr;
+ arr_len <<= 1;
+ p = GF_REALLOC(linearr, arr_len);
+ if (!p) {
+ GF_FREE(line);
error = _gf_true;
goto out;
+ }
+ linearr = p;
}
+ linearr[arr_idx] = line;
- runner_redir (runner, STDOUT_FILENO, RUN_PIPE);
- if (runner_start (runner) != 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SPAWNING_CHILD_FAILED,
- "spawning child failed");
-
- error = _gf_true;
- goto out;
- }
-
- arr_idx = 0;
- for (;;) {
- size_t len;
- line = GF_MALLOC (1024, gf_gld_mt_linebuf);
- if (!line) {
- error = _gf_true;
- goto out;
- }
-
- if (fgets (line, 1024, runner_chio (runner, STDOUT_FILENO)) ==
- NULL) {
- GF_FREE (line);
- break;
- }
-
- len = strlen (line);
- if (len == 0 || line[len - 1] != '\n') {
- GF_FREE (line);
- error = _gf_true;
- goto out;
- }
- line[len - 1] = '\0';
-
- if (arr_idx == arr_len) {
- void *p = linearr;
- arr_len <<= 1;
- p = GF_REALLOC (linearr, arr_len);
- if (!p) {
- GF_FREE (line);
- error = _gf_true;
- goto out;
- }
- linearr = p;
- }
- linearr[arr_idx] = line;
-
- arr_idx++;
- }
-
- out:
+ arr_idx++;
+ }
- /* XXX chpid field is not exported by run API
- * but runner_end() does not abort the invoked
- * process (ie. it might block in waitpid(2))
- * so we resort to a manual kill a the private field
- */
- if (error && runner->chpid > 0)
- kill (runner->chpid, SIGKILL);
-
- if (runner_end (runner) != 0)
- error = _gf_true;
+out:
- if (error) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_READ_CHILD_DATA_FAILED,
- "reading data from child failed");
- glusterd_urltransform_free (linearr, arr_idx);
- return -1;
- }
+ /* XXX chpid field is not exported by run API
+ * but runner_end() does not abort the invoked
+ * process (ie. it might block in waitpid(2))
+ * so we resort to a manual kill a the private field
+ */
+ if (error && runner->chpid > 0)
+ kill(runner->chpid, SIGKILL);
+
+ if (runner_end(runner) != 0)
+ error = _gf_true;
+
+ if (error) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_READ_CHILD_DATA_FAILED,
+ "reading data from child failed");
+ glusterd_urltransform_free(linearr, arr_idx);
+ return -1;
+ }
- *linearrp = linearr;
- return arr_idx;
+ *linearrp = linearr;
+ return arr_idx;
}
static int
-glusterd_urltransform_single (const char *url, const char *transname,
- char ***linearrp)
+glusterd_urltransform_single(const char *url, const char *transname,
+ char ***linearrp)
{
- runner_t runner = {0,};
+ runner_t runner = {
+ 0,
+ };
- glusterd_urltransform_init (&runner, transname);
- glusterd_urltransform_add (&runner, url);
- return glusterd_urltransform (&runner, linearrp);
+ glusterd_urltransform_init(&runner, transname);
+ glusterd_urltransform_add(&runner, url);
+ return glusterd_urltransform(&runner, linearrp);
}
-
struct dictidxmark {
- unsigned isrch;
- unsigned ithis;
- char *ikey;
+ unsigned isrch;
+ unsigned ithis;
+ char *ikey;
};
-
struct slave_vol_config {
- char old_slvhost[_POSIX_HOST_NAME_MAX+1];
- char old_slvuser[LOGIN_NAME_MAX];
- unsigned old_slvidx;
- char slave_voluuid[GF_UUID_BUF_SIZE];
+ char old_slvhost[_POSIX_HOST_NAME_MAX + 1];
+ char old_slvuser[LOGIN_NAME_MAX];
+ unsigned old_slvidx;
+ char slave_voluuid[GF_UUID_BUF_SIZE];
};
static int
-_dict_mark_atindex (dict_t *dict, char *key, data_t *value, void *data)
+_dict_mark_atindex(dict_t *dict, char *key, data_t *value, void *data)
{
- struct dictidxmark *dim = data;
+ struct dictidxmark *dim = data;
- if (dim->isrch == dim->ithis)
- dim->ikey = key;
+ if (dim->isrch == dim->ithis)
+ dim->ikey = key;
- dim->ithis++;
- return 0;
+ dim->ithis++;
+ return 0;
}
static char *
-dict_get_by_index (dict_t *dict, unsigned i)
+dict_get_by_index(dict_t *dict, unsigned i)
{
- struct dictidxmark dim = {0,};
+ struct dictidxmark dim = {
+ 0,
+ };
- dim.isrch = i;
- dict_foreach (dict, _dict_mark_atindex, &dim);
+ dim.isrch = i;
+ dict_foreach(dict, _dict_mark_atindex, &dim);
- return dim.ikey;
+ return dim.ikey;
}
static int
-glusterd_get_slave (glusterd_volinfo_t *vol, const char *slaveurl, char **slavekey)
+glusterd_get_slave(glusterd_volinfo_t *vol, const char *slaveurl,
+ char **slavekey)
{
- runner_t runner = {0,};
- int n = 0;
- int i = 0;
- char **linearr = NULL;
- int32_t ret = 0;
-
- glusterd_urltransform_init (&runner, "canonicalize");
- ret = dict_foreach (vol->gsync_slaves, _glusterd_urltransform_add_iter,
- &runner);
- if (ret < 0)
- return -2;
-
- glusterd_urltransform_add (&runner, slaveurl);
-
- n = glusterd_urltransform (&runner, &linearr);
- if (n == -1)
- return -2;
-
- for (i = 0; i < n - 1; i++) {
- if (strcmp (linearr[i], linearr[n - 1]) == 0)
- break;
- }
- glusterd_urltransform_free (linearr, n);
-
- if (i < n - 1)
- *slavekey = dict_get_by_index (vol->gsync_slaves, i);
- else
- i = -1;
-
- return i;
+ runner_t runner = {
+ 0,
+ };
+ int n = 0;
+ int i = 0;
+ char **linearr = NULL;
+ int32_t ret = 0;
+
+ glusterd_urltransform_init(&runner, "canonicalize");
+ ret = dict_foreach(vol->gsync_slaves, _glusterd_urltransform_add_iter,
+ &runner);
+ if (ret < 0)
+ return -2;
+
+ glusterd_urltransform_add(&runner, slaveurl);
+
+ n = glusterd_urltransform(&runner, &linearr);
+ if (n == -1)
+ return -2;
+
+ for (i = 0; i < n - 1; i++) {
+ if (strcmp(linearr[i], linearr[n - 1]) == 0)
+ break;
+ }
+ glusterd_urltransform_free(linearr, n);
+
+ if (i < n - 1)
+ *slavekey = dict_get_by_index(vol->gsync_slaves, i);
+ else
+ i = -1;
+
+ return i;
}
static int
-glusterd_query_extutil_generic (char *resbuf, size_t blen, runner_t *runner, void *data,
- int (*fcbk)(char *resbuf, size_t blen, FILE *fp, void *data))
+glusterd_query_extutil_generic(char *resbuf, size_t blen, runner_t *runner,
+ void *data,
+ int (*fcbk)(char *resbuf, size_t blen, FILE *fp,
+ void *data))
{
- int ret = 0;
- xlator_t *this = NULL;
+ int ret = 0;
+ xlator_t *this = NULL;
- this = THIS;
- GF_ASSERT (this);
+ this = THIS;
+ GF_ASSERT(this);
- runner_redir (runner, STDOUT_FILENO, RUN_PIPE);
- if (runner_start (runner) != 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SPAWNING_CHILD_FAILED,
- "spawning child failed");
+ runner_redir(runner, STDOUT_FILENO, RUN_PIPE);
+ if (runner_start(runner) != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SPAWNING_CHILD_FAILED,
+ "spawning child failed");
- return -1;
- }
+ return -1;
+ }
- ret = fcbk (resbuf, blen, runner_chio (runner, STDOUT_FILENO), data);
+ ret = fcbk(resbuf, blen, runner_chio(runner, STDOUT_FILENO), data);
- ret |= runner_end (runner);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_READ_CHILD_DATA_FAILED,
- "reading data from child failed");
+ ret |= runner_end(runner);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_READ_CHILD_DATA_FAILED,
+ "reading data from child failed");
- return ret ? -1 : 0;
+ return ret ? -1 : 0;
}
static int
_fcbk_singleline(char *resbuf, size_t blen, FILE *fp, void *data)
{
- char *ptr = NULL;
+ char *ptr = NULL;
- errno = 0;
- ptr = fgets (resbuf, blen, fp);
- if (ptr) {
- size_t len = strlen(resbuf);
- if (len && resbuf[len-1] == '\n')
- resbuf[len-1] = '\0'; //strip off \n
- }
+ errno = 0;
+ ptr = fgets(resbuf, blen, fp);
+ if (ptr) {
+ size_t len = strlen(resbuf);
+ if (len && resbuf[len - 1] == '\n')
+ resbuf[len - 1] = '\0'; // strip off \n
+ }
- return errno ? -1 : 0;
+ return errno ? -1 : 0;
}
static int
-glusterd_query_extutil (char *resbuf, runner_t *runner)
+glusterd_query_extutil(char *resbuf, runner_t *runner)
{
- return glusterd_query_extutil_generic (resbuf, PATH_MAX, runner, NULL,
- _fcbk_singleline);
+ return glusterd_query_extutil_generic(resbuf, PATH_MAX, runner, NULL,
+ _fcbk_singleline);
}
static int
-glusterd_get_slave_voluuid (char *slave_host, char *slave_vol, char *vol_uuid)
+glusterd_get_slave_voluuid(char *slave_host, char *slave_vol, char *vol_uuid)
{
- runner_t runner = {0,};
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- int ret = -1;
+ runner_t runner = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ int ret = -1;
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
- runinit (&runner);
- runner_add_arg (&runner, GSYNCD_PREFIX"/gsyncd");
- runner_add_arg (&runner, "--slavevoluuid-get");
- runner_argprintf (&runner, "%s::%s", slave_host, slave_vol);
+ runinit(&runner);
+ runner_add_arg(&runner, GSYNCD_PREFIX "/gsyncd");
+ runner_add_arg(&runner, "--slavevoluuid-get");
+ runner_argprintf(&runner, "%s::%s", slave_host, slave_vol);
- synclock_unlock (&priv->big_lock);
- ret = glusterd_query_extutil (vol_uuid, &runner);
- synclock_lock (&priv->big_lock);
+ synclock_unlock(&priv->big_lock);
+ ret = glusterd_query_extutil(vol_uuid, &runner);
+ synclock_lock(&priv->big_lock);
out:
- return ret;
+ return ret;
}
-
static int
-_fcbk_conftodict (char *resbuf, size_t blen, FILE *fp, void *data)
+_fcbk_conftodict(char *resbuf, size_t blen, FILE *fp, void *data)
{
- char *ptr = NULL;
- dict_t *dict = data;
- char *v = NULL;
-
- for (;;) {
- errno = 0;
- ptr = fgets (resbuf, blen, fp);
- if (!ptr)
- break;
- v = resbuf + strlen(resbuf) - 1;
- while (isspace (*v))
- /* strip trailing space */
- *v-- = '\0';
- if (v == resbuf)
- /* skip empty line */
- continue;
- v = strchr (resbuf, ':');
- if (!v)
- return -1;
- *v++ = '\0';
- while (isspace (*v))
- v++;
- v = gf_strdup (v);
- if (!v)
- return -1;
- if (dict_set_dynstr (dict, resbuf, v) != 0) {
- GF_FREE (v);
- return -1;
- }
- }
+ char *ptr = NULL;
+ dict_t *dict = data;
+ char *v = NULL;
- return errno ? -1 : 0;
+ for (;;) {
+ errno = 0;
+ ptr = fgets(resbuf, blen, fp);
+ if (!ptr)
+ break;
+ v = resbuf + strlen(resbuf) - 1;
+ while (isspace(*v))
+ /* strip trailing space */
+ *v-- = '\0';
+ if (v == resbuf)
+ /* skip empty line */
+ continue;
+ v = strchr(resbuf, ':');
+ if (!v)
+ return -1;
+ *v++ = '\0';
+ while (isspace(*v))
+ v++;
+ v = gf_strdup(v);
+ if (!v)
+ return -1;
+ if (dict_set_dynstr(dict, resbuf, v) != 0) {
+ GF_FREE(v);
+ return -1;
+ }
+ }
+
+ return errno ? -1 : 0;
}
static int
-glusterd_gsync_get_config (char *master, char *slave, char *conf_path, dict_t *dict)
+glusterd_gsync_get_config(char *master, char *slave, char *conf_path,
+ dict_t *dict)
{
- /* key + value, where value must be able to accommodate a path */
- char resbuf[256 + PATH_MAX] = {0,};
- runner_t runner = {0,};
-
- runinit (&runner);
- runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL);
- runner_argprintf (&runner, "%s", conf_path);
- runner_argprintf (&runner, "--iprefix=%s", DATADIR);
- runner_argprintf (&runner, ":%s", master);
- runner_add_args (&runner, slave, "--config-get-all", NULL);
-
- return glusterd_query_extutil_generic (resbuf, sizeof (resbuf),
- &runner, dict, _fcbk_conftodict);
+ /* key + value, where value must be able to accommodate a path */
+ char resbuf[256 + PATH_MAX] = {
+ 0,
+ };
+ runner_t runner = {
+ 0,
+ };
+
+ runinit(&runner);
+ runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
+ runner_argprintf(&runner, "%s", conf_path);
+ runner_argprintf(&runner, "--iprefix=%s", DATADIR);
+ runner_argprintf(&runner, ":%s", master);
+ runner_add_args(&runner, slave, "--config-get-all", NULL);
+
+ return glusterd_query_extutil_generic(resbuf, sizeof(resbuf), &runner, dict,
+ _fcbk_conftodict);
}
static int
-_fcbk_statustostruct (char *resbuf, size_t blen, FILE *fp,
- void *data)
+_fcbk_statustostruct(char *resbuf, size_t blen, FILE *fp, void *data)
{
- char *ptr = NULL;
- char *v = NULL;
- char *k = NULL;
- gf_gsync_status_t *sts_val = NULL;
-
- sts_val = (gf_gsync_status_t *)data;
-
- for (;;) {
- errno = 0;
- ptr = fgets (resbuf, blen, fp);
- if (!ptr)
- break;
-
- v = resbuf + strlen(resbuf) - 1;
- while (isspace (*v))
- /* strip trailing space */
- *v-- = '\0';
- if (v == resbuf)
- /* skip empty line */
- continue;
- v = strchr (resbuf, ':');
- if (!v)
- return -1;
- *v++ = '\0';
- while (isspace (*v))
- v++;
- v = gf_strdup (v);
- if (!v)
- return -1;
-
- k = gf_strdup (resbuf);
- if (!k) {
- GF_FREE (v);
- return -1;
- }
+ char *ptr = NULL;
+ char *v = NULL;
+ char *k = NULL;
+ gf_gsync_status_t *sts_val = NULL;
- if (strcmp (k, "worker_status") == 0) {
- memcpy (sts_val->worker_status, v,
- strlen(v));
- sts_val->worker_status[strlen(v)] = '\0';
- } else if (strcmp (k, "slave_node") == 0) {
- memcpy (sts_val->slave_node, v,
- strlen(v));
- sts_val->slave_node[strlen(v)] = '\0';
- } else if (strcmp (k, "crawl_status") == 0) {
- memcpy (sts_val->crawl_status, v,
- strlen(v));
- sts_val->crawl_status[strlen(v)] = '\0';
- } else if (strcmp (k, "last_synced") == 0) {
- memcpy (sts_val->last_synced, v,
- strlen(v));
- sts_val->last_synced[strlen(v)] = '\0';
- } else if (strcmp (k, "last_synced_utc") == 0) {
- memcpy (sts_val->last_synced_utc, v,
- strlen(v));
- sts_val->last_synced_utc[strlen(v)] = '\0';
- } else if (strcmp (k, "entry") == 0) {
- memcpy (sts_val->entry, v,
- strlen(v));
- sts_val->entry[strlen(v)] = '\0';
- } else if (strcmp (k, "data") == 0) {
- memcpy (sts_val->data, v,
- strlen(v));
- sts_val->data[strlen(v)] = '\0';
- } else if (strcmp (k, "meta") == 0) {
- memcpy (sts_val->meta, v,
- strlen(v));
- sts_val->meta[strlen(v)] = '\0';
- } else if (strcmp (k, "failures") == 0) {
- memcpy (sts_val->failures, v,
- strlen(v));
- sts_val->failures[strlen(v)] = '\0';
- } else if (strcmp (k, "checkpoint_time") == 0) {
- memcpy (sts_val->checkpoint_time, v,
- strlen(v));
- sts_val->checkpoint_time[strlen(v)] = '\0';
- } else if (strcmp (k, "checkpoint_time_utc") == 0) {
- memcpy (sts_val->checkpoint_time_utc, v,
- strlen(v));
- sts_val->checkpoint_time_utc[strlen(v)] = '\0';
- } else if (strcmp (k, "checkpoint_completed") == 0) {
- memcpy (sts_val->checkpoint_completed, v,
- strlen(v));
- sts_val->checkpoint_completed[strlen(v)] = '\0';
- } else if (strcmp (k, "checkpoint_completion_time") == 0) {
- memcpy (sts_val->checkpoint_completion_time, v,
- strlen(v));
- sts_val->checkpoint_completion_time[strlen(v)] = '\0';
- } else if (strcmp (k, "checkpoint_completion_time_utc") == 0) {
- memcpy (sts_val->checkpoint_completion_time_utc, v,
- strlen(v));
- sts_val->checkpoint_completion_time_utc[strlen(v)] =
- '\0';
- }
- GF_FREE(v);
- GF_FREE(k);
- }
+ sts_val = (gf_gsync_status_t *)data;
- return errno ? -1 : 0;
+ for (;;) {
+ errno = 0;
+ ptr = fgets(resbuf, blen, fp);
+ if (!ptr)
+ break;
+
+ v = resbuf + strlen(resbuf) - 1;
+ while (isspace(*v))
+ /* strip trailing space */
+ *v-- = '\0';
+ if (v == resbuf)
+ /* skip empty line */
+ continue;
+ v = strchr(resbuf, ':');
+ if (!v)
+ return -1;
+ *v++ = '\0';
+ while (isspace(*v))
+ v++;
+ v = gf_strdup(v);
+ if (!v)
+ return -1;
+
+ k = gf_strdup(resbuf);
+ if (!k) {
+ GF_FREE(v);
+ return -1;
+ }
+
+ if (strcmp(k, "worker_status") == 0) {
+ memcpy(sts_val->worker_status, v, strlen(v));
+ sts_val->worker_status[strlen(v)] = '\0';
+ } else if (strcmp(k, "slave_node") == 0) {
+ memcpy(sts_val->slave_node, v, strlen(v));
+ sts_val->slave_node[strlen(v)] = '\0';
+ } else if (strcmp(k, "crawl_status") == 0) {
+ memcpy(sts_val->crawl_status, v, strlen(v));
+ sts_val->crawl_status[strlen(v)] = '\0';
+ } else if (strcmp(k, "last_synced") == 0) {
+ memcpy(sts_val->last_synced, v, strlen(v));
+ sts_val->last_synced[strlen(v)] = '\0';
+ } else if (strcmp(k, "last_synced_utc") == 0) {
+ memcpy(sts_val->last_synced_utc, v, strlen(v));
+ sts_val->last_synced_utc[strlen(v)] = '\0';
+ } else if (strcmp(k, "entry") == 0) {
+ memcpy(sts_val->entry, v, strlen(v));
+ sts_val->entry[strlen(v)] = '\0';
+ } else if (strcmp(k, "data") == 0) {
+ memcpy(sts_val->data, v, strlen(v));
+ sts_val->data[strlen(v)] = '\0';
+ } else if (strcmp(k, "meta") == 0) {
+ memcpy(sts_val->meta, v, strlen(v));
+ sts_val->meta[strlen(v)] = '\0';
+ } else if (strcmp(k, "failures") == 0) {
+ memcpy(sts_val->failures, v, strlen(v));
+ sts_val->failures[strlen(v)] = '\0';
+ } else if (strcmp(k, "checkpoint_time") == 0) {
+ memcpy(sts_val->checkpoint_time, v, strlen(v));
+ sts_val->checkpoint_time[strlen(v)] = '\0';
+ } else if (strcmp(k, "checkpoint_time_utc") == 0) {
+ memcpy(sts_val->checkpoint_time_utc, v, strlen(v));
+ sts_val->checkpoint_time_utc[strlen(v)] = '\0';
+ } else if (strcmp(k, "checkpoint_completed") == 0) {
+ memcpy(sts_val->checkpoint_completed, v, strlen(v));
+ sts_val->checkpoint_completed[strlen(v)] = '\0';
+ } else if (strcmp(k, "checkpoint_completion_time") == 0) {
+ memcpy(sts_val->checkpoint_completion_time, v, strlen(v));
+ sts_val->checkpoint_completion_time[strlen(v)] = '\0';
+ } else if (strcmp(k, "checkpoint_completion_time_utc") == 0) {
+ memcpy(sts_val->checkpoint_completion_time_utc, v, strlen(v));
+ sts_val->checkpoint_completion_time_utc[strlen(v)] = '\0';
+ }
+ GF_FREE(v);
+ GF_FREE(k);
+ }
+
+ return errno ? -1 : 0;
}
-
static int
-glusterd_gsync_get_status (char *master, char *slave, char *conf_path,
- char *brick_path, gf_gsync_status_t *sts_val)
+glusterd_gsync_get_status(char *master, char *slave, char *conf_path,
+ char *brick_path, gf_gsync_status_t *sts_val)
{
- /* key + value, where value must be able to accommodate a path */
- char resbuf[256 + PATH_MAX] = {0,};
- runner_t runner = {0,};
-
- runinit (&runner);
- runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL);
- runner_argprintf (&runner, "%s", conf_path);
- runner_argprintf (&runner, "--iprefix=%s", DATADIR);
- runner_argprintf (&runner, ":%s", master);
- runner_add_args (&runner, slave, "--status-get", NULL);
- runner_add_args (&runner, "--path", brick_path, NULL);
-
- return glusterd_query_extutil_generic (resbuf, sizeof (resbuf),
- &runner, sts_val,
- _fcbk_statustostruct);
+ /* key + value, where value must be able to accommodate a path */
+ char resbuf[256 + PATH_MAX] = {
+ 0,
+ };
+ runner_t runner = {
+ 0,
+ };
+
+ runinit(&runner);
+ runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
+ runner_argprintf(&runner, "%s", conf_path);
+ runner_argprintf(&runner, "--iprefix=%s", DATADIR);
+ runner_argprintf(&runner, ":%s", master);
+ runner_add_args(&runner, slave, "--status-get", NULL);
+ runner_add_args(&runner, "--path", brick_path, NULL);
+
+ return glusterd_query_extutil_generic(resbuf, sizeof(resbuf), &runner,
+ sts_val, _fcbk_statustostruct);
}
static int
-glusterd_gsync_get_param_file (char *prmfile, const char *param, char *master,
- char *slave, char *conf_path)
+glusterd_gsync_get_param_file(char *prmfile, const char *param, char *master,
+ char *slave, char *conf_path)
{
- runner_t runner = {0,};
-
- runinit (&runner);
- runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL);
- runner_argprintf (&runner, "%s", conf_path);
- runner_argprintf (&runner, "--iprefix=%s", DATADIR);
- runner_argprintf (&runner, ":%s", master);
- runner_add_args (&runner, slave, "--config-get", NULL);
- runner_argprintf (&runner, "%s-file", param);
-
- return glusterd_query_extutil (prmfile, &runner);
+ runner_t runner = {
+ 0,
+ };
+
+ runinit(&runner);
+ runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
+ runner_argprintf(&runner, "%s", conf_path);
+ runner_argprintf(&runner, "--iprefix=%s", DATADIR);
+ runner_argprintf(&runner, ":%s", master);
+ runner_add_args(&runner, slave, "--config-get", NULL);
+ runner_argprintf(&runner, "%s-file", param);
+
+ return glusterd_query_extutil(prmfile, &runner);
}
static int
-gsyncd_getpidfile (char *master, char *slave, char *pidfile,
- char *conf_path, gf_boolean_t *is_template_in_use)
+gsyncd_getpidfile(char *master, char *slave, char *pidfile, char *conf_path,
+ gf_boolean_t *is_template_in_use)
{
- char temp_conf_path[PATH_MAX] = "";
- char *working_conf_path = NULL;
- glusterd_conf_t *priv = NULL;
- int ret = -1;
- struct stat stbuf = {0,};
- xlator_t *this = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (this->private);
- GF_ASSERT (conf_path);
-
- priv = this->private;
-
- GF_VALIDATE_OR_GOTO ("gsync", master, out);
- GF_VALIDATE_OR_GOTO ("gsync", slave, out);
-
- len = snprintf (temp_conf_path, sizeof(temp_conf_path),
- "%s/"GSYNC_CONF_TEMPLATE, priv->workdir);
- if ((len < 0) || (len >= sizeof(temp_conf_path))) {
- goto out;
- }
-
- ret = sys_lstat (conf_path, &stbuf);
- if (!ret) {
- gf_msg_debug (this->name, 0, "Using passed config template(%s).",
- conf_path);
- working_conf_path = conf_path;
- } else {
- gf_msg (this->name, GF_LOG_WARNING, ENOENT,
- GD_MSG_FILE_OP_FAILED,
- "Config file (%s) missing. Looking for template "
- "config file (%s)", conf_path, temp_conf_path);
- ret = sys_lstat (temp_conf_path, &stbuf);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, ENOENT,
- GD_MSG_FILE_OP_FAILED,
- "Template config file (%s) missing.",
- temp_conf_path);
- goto out;
- }
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG,
- "Using default config template(%s).",
- temp_conf_path);
- working_conf_path = temp_conf_path;
- *is_template_in_use = _gf_true;
+ char temp_conf_path[PATH_MAX] = "";
+ char *working_conf_path = NULL;
+ glusterd_conf_t *priv = NULL;
+ int ret = -1;
+ struct stat stbuf = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(this->private);
+ GF_ASSERT(conf_path);
+
+ priv = this->private;
+
+ GF_VALIDATE_OR_GOTO("gsync", master, out);
+ GF_VALIDATE_OR_GOTO("gsync", slave, out);
+
+ len = snprintf(temp_conf_path, sizeof(temp_conf_path),
+ "%s/" GSYNC_CONF_TEMPLATE, priv->workdir);
+ if ((len < 0) || (len >= sizeof(temp_conf_path))) {
+ goto out;
+ }
+
+ ret = sys_lstat(conf_path, &stbuf);
+ if (!ret) {
+ gf_msg_debug(this->name, 0, "Using passed config template(%s).",
+ conf_path);
+ working_conf_path = conf_path;
+ } else {
+ gf_msg(this->name, GF_LOG_WARNING, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "Config file (%s) missing. Looking for template "
+ "config file (%s)",
+ conf_path, temp_conf_path);
+ ret = sys_lstat(temp_conf_path, &stbuf);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "Template config file (%s) missing.", temp_conf_path);
+ goto out;
}
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG,
+ "Using default config template(%s).", temp_conf_path);
+ working_conf_path = temp_conf_path;
+ *is_template_in_use = _gf_true;
+ }
fetch_data:
- ret = glusterd_gsync_get_param_file (pidfile, "pid", master,
- slave, working_conf_path);
- if ((ret == -1) || strlen(pidfile) == 0) {
- if (*is_template_in_use == _gf_false) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PIDFILE_CREATE_FAILED,
- "failed to create the pidfile string. "
- "Trying default config template");
- working_conf_path = temp_conf_path;
- *is_template_in_use = _gf_true;
- goto fetch_data;
- } else {
- ret = -2;
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PIDFILE_CREATE_FAILED, "failed to "
- "create the pidfile string from template "
- "config");
- goto out;
- }
+ ret = glusterd_gsync_get_param_file(pidfile, "pid", master, slave,
+ working_conf_path);
+ if ((ret == -1) || strlen(pidfile) == 0) {
+ if (*is_template_in_use == _gf_false) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PIDFILE_CREATE_FAILED,
+ "failed to create the pidfile string. "
+ "Trying default config template");
+ working_conf_path = temp_conf_path;
+ *is_template_in_use = _gf_true;
+ goto fetch_data;
+ } else {
+ ret = -2;
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PIDFILE_CREATE_FAILED,
+ "failed to "
+ "create the pidfile string from template "
+ "config");
+ goto out;
}
+ }
- gf_msg_debug (this->name, 0, "pidfile = %s", pidfile);
+ gf_msg_debug(this->name, 0, "pidfile = %s", pidfile);
- ret = open (pidfile, O_RDWR);
- out:
- return ret;
+ ret = open(pidfile, O_RDWR);
+out:
+ return ret;
}
static int
-gsync_status_byfd (int fd)
+gsync_status_byfd(int fd)
{
- GF_ASSERT (fd >= -1);
+ GF_ASSERT(fd >= -1);
- if (lockf (fd, F_TEST, 0) == -1 &&
- (errno == EAGAIN || errno == EACCES))
- /* gsyncd keeps the pidfile locked */
- return 0;
+ if (lockf(fd, F_TEST, 0) == -1 && (errno == EAGAIN || errno == EACCES))
+ /* gsyncd keeps the pidfile locked */
+ return 0;
- return -1;
+ return -1;
}
/* status: return 0 when gsync is running
* return -1 when not running
*/
int
-gsync_status (char *master, char *slave, char *conf_path,
- int *status, gf_boolean_t *is_template_in_use)
+gsync_status(char *master, char *slave, char *conf_path, int *status,
+ gf_boolean_t *is_template_in_use)
{
- char pidfile[PATH_MAX] = {0,};
- int fd = -1;
-
- fd = gsyncd_getpidfile (master, slave, pidfile,
- conf_path, is_template_in_use);
- if (fd == -2)
- return -1;
+ char pidfile[PATH_MAX] = {
+ 0,
+ };
+ int fd = -1;
+
+ fd = gsyncd_getpidfile(master, slave, pidfile, conf_path,
+ is_template_in_use);
+ if (fd == -2)
+ return -1;
- *status = gsync_status_byfd (fd);
+ *status = gsync_status_byfd(fd);
- sys_close (fd);
+ sys_close(fd);
- return 0;
+ return 0;
}
-
static int32_t
-glusterd_gsync_volinfo_dict_set (glusterd_volinfo_t *volinfo,
- char *key, char *value)
+glusterd_gsync_volinfo_dict_set(glusterd_volinfo_t *volinfo, char *key,
+ char *value)
{
- int32_t ret = -1;
- char *gsync_status = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- gsync_status = gf_strdup (value);
- if (!gsync_status) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "Unable to allocate memory");
- goto out;
- }
-
- ret = dict_set_dynstr (volinfo->dict, key, gsync_status);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Unable to set dict");
- goto out;
- }
-
- ret = 0;
+ int32_t ret = -1;
+ char *gsync_status = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ gsync_status = gf_strdup(value);
+ if (!gsync_status) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Unable to allocate memory");
+ goto out;
+ }
+
+ ret = dict_set_dynstr(volinfo->dict, key, gsync_status);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set dict");
+ goto out;
+ }
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
static int
-glusterd_verify_gsyncd_spawn (char *master, char *slave)
+glusterd_verify_gsyncd_spawn(char *master, char *slave)
{
- int ret = 0;
- runner_t runner = {0,};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- runinit (&runner);
- runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd",
- "--verify", "spawning", NULL);
- runner_argprintf (&runner, ":%s", master);
- runner_add_args (&runner, slave, NULL);
- runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
- ret = runner_start (&runner);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SPAWNING_CHILD_FAILED,
- "spawning child failed");
- ret = -1;
- goto out;
- }
-
- if (runner_end (&runner) != 0)
- ret = -1;
+ int ret = 0;
+ runner_t runner = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ runinit(&runner);
+ runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "--verify", "spawning",
+ NULL);
+ runner_argprintf(&runner, ":%s", master);
+ runner_add_args(&runner, slave, NULL);
+ runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
+ ret = runner_start(&runner);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SPAWNING_CHILD_FAILED,
+ "spawning child failed");
+ ret = -1;
+ goto out;
+ }
+
+ if (runner_end(&runner) != 0)
+ ret = -1;
out:
- gf_msg_debug (this->name, 0, "returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ return ret;
}
static int
-gsync_verify_config_options (dict_t *dict, char **op_errstr, char *volname)
+gsync_verify_config_options(dict_t *dict, char **op_errstr, char *volname)
{
- char **resopt = NULL;
- int i = 0;
- int ret = -1;
- char *subop = NULL;
- char *slave = NULL;
- char *op_name = NULL;
- char *op_value = NULL;
- char *t = NULL;
- char errmsg[PATH_MAX] = "";
- gf_boolean_t banned = _gf_true;
- gf_boolean_t op_match = _gf_true;
- gf_boolean_t val_match = _gf_true;
- struct gsync_config_opt_vals_ *conf_vals = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- if (dict_get_str (dict, "subop", &subop) != 0) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
- "missing subop");
- *op_errstr = gf_strdup ("Invalid config request");
- return -1;
- }
-
- if (dict_get_str (dict, "slave", &slave) != 0) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
- GEOREP" CONFIG: no slave given");
- *op_errstr = gf_strdup ("Slave required");
- return -1;
- }
-
- if (strcmp (subop, "get-all") == 0)
- return 0;
-
- if (dict_get_str (dict, "op_name", &op_name) != 0) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
- "option name missing");
- *op_errstr = gf_strdup ("Option name missing");
- return -1;
- }
-
- if (runcmd (GSYNCD_PREFIX"/gsyncd", "--config-check", op_name, NULL)) {
- ret = glusterd_verify_gsyncd_spawn (volname, slave);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GSYNCD_SPAWN_FAILED, "Unable to spawn "
- "gsyncd");
- return 0;
- }
-
- gf_msg (this->name, GF_LOG_WARNING, EINVAL,
- GD_MSG_INVALID_ENTRY,
- "Invalid option %s", op_name);
- *op_errstr = gf_strdup ("Invalid option");
+ char **resopt = NULL;
+ int i = 0;
+ int ret = -1;
+ char *subop = NULL;
+ char *slave = NULL;
+ char *op_name = NULL;
+ char *op_value = NULL;
+ char *t = NULL;
+ char errmsg[PATH_MAX] = "";
+ gf_boolean_t banned = _gf_true;
+ gf_boolean_t op_match = _gf_true;
+ gf_boolean_t val_match = _gf_true;
+ struct gsync_config_opt_vals_ *conf_vals = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (dict_get_str(dict, "subop", &subop) != 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "missing subop");
+ *op_errstr = gf_strdup("Invalid config request");
+ return -1;
+ }
- return -1;
- }
+ if (dict_get_str(dict, "slave", &slave) != 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ GEOREP " CONFIG: no slave given");
+ *op_errstr = gf_strdup("Slave required");
+ return -1;
+ }
- if (strcmp (subop, "get") == 0)
- return 0;
+ if (strcmp(subop, "get-all") == 0)
+ return 0;
- t = strtail (subop, "set");
- if (!t)
- t = strtail (subop, "del");
- if (!t || (t[0] && strcmp (t, "-glob") != 0)) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_SUBOP_NOT_FOUND,
- "unknown subop %s", subop);
- *op_errstr = gf_strdup ("Invalid config request");
- return -1;
- }
+ if (dict_get_str(dict, "op_name", &op_name) != 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "option name missing");
+ *op_errstr = gf_strdup("Option name missing");
+ return -1;
+ }
- if (strtail (subop, "set") &&
- dict_get_str (dict, "op_value", &op_value) != 0) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
- "missing value for set");
- *op_errstr = gf_strdup ("missing value");
+ if (runcmd(GSYNCD_PREFIX "/gsyncd", "--config-check", op_name, NULL)) {
+ ret = glusterd_verify_gsyncd_spawn(volname, slave);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED,
+ "Unable to spawn "
+ "gsyncd");
+ return 0;
}
- /* match option name against reserved options, modulo -/_
- * difference
- */
- for (resopt = gsync_reserved_opts; *resopt; resopt++) {
- banned = _gf_true;
- for (i = 0; (*resopt)[i] && op_name[i]; i++) {
- if ((*resopt)[i] == op_name[i] ||
- ((*resopt)[i] == '-' && op_name[i] == '_'))
- continue;
- banned = _gf_false;
- }
+ gf_msg(this->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid option %s", op_name);
+ *op_errstr = gf_strdup("Invalid option");
- if (op_name[i] != '\0')
- banned = _gf_false;
+ return -1;
+ }
- if (banned) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_RESERVED_OPTION,
- "Reserved option %s", op_name);
- *op_errstr = gf_strdup ("Reserved option");
+ if (strcmp(subop, "get") == 0)
+ return 0;
- return -1;
- break;
+ t = strtail(subop, "set");
+ if (!t)
+ t = strtail(subop, "del");
+ if (!t || (t[0] && strcmp(t, "-glob") != 0)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_SUBOP_NOT_FOUND,
+ "unknown subop %s", subop);
+ *op_errstr = gf_strdup("Invalid config request");
+ return -1;
+ }
+
+ if (strtail(subop, "set") &&
+ dict_get_str(dict, "op_value", &op_value) != 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "missing value for set");
+ *op_errstr = gf_strdup("missing value");
+ }
+
+ /* match option name against reserved options, modulo -/_
+ * difference
+ */
+ for (resopt = gsync_reserved_opts; *resopt; resopt++) {
+ banned = _gf_true;
+ for (i = 0; (*resopt)[i] && op_name[i]; i++) {
+ if ((*resopt)[i] == op_name[i] ||
+ ((*resopt)[i] == '-' && op_name[i] == '_'))
+ continue;
+ banned = _gf_false;
+ }
+
+ if (op_name[i] != '\0')
+ banned = _gf_false;
+
+ if (banned) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_RESERVED_OPTION,
+ "Reserved option %s", op_name);
+ *op_errstr = gf_strdup("Reserved option");
+
+ return -1;
+ break;
+ }
+ }
+
+ /* Check options in gsync_confopt_vals for invalid values */
+ for (conf_vals = gsync_confopt_vals; conf_vals->op_name; conf_vals++) {
+ op_match = _gf_true;
+ for (i = 0; conf_vals->op_name[i] && op_name[i]; i++) {
+ if (conf_vals->op_name[i] == op_name[i] ||
+ (conf_vals->op_name[i] == '_' && op_name[i] == '-'))
+ continue;
+ op_match = _gf_false;
+ }
+
+ if (op_match) {
+ if (!op_value)
+ goto out;
+ val_match = _gf_false;
+ for (i = 0; i < conf_vals->no_of_pos_vals; i++) {
+ if (conf_vals->case_sensitive) {
+ if (!strcmp(conf_vals->values[i], op_value))
+ val_match = _gf_true;
+ } else {
+ if (!strcasecmp(conf_vals->values[i], op_value))
+ val_match = _gf_true;
}
- }
+ }
- /* Check options in gsync_confopt_vals for invalid values */
- for (conf_vals = gsync_confopt_vals; conf_vals->op_name; conf_vals++) {
- op_match = _gf_true;
- for (i = 0; conf_vals->op_name[i] && op_name[i]; i++) {
- if (conf_vals->op_name[i] == op_name[i] ||
- (conf_vals->op_name[i] == '_' && op_name[i] == '-'))
- continue;
- op_match = _gf_false;
- }
+ if (!val_match) {
+ ret = snprintf(errmsg, sizeof(errmsg) - 1,
+ "Invalid value(%s) for"
+ " option %s",
+ op_value, op_name);
+ errmsg[ret] = '\0';
- if (op_match) {
- if (!op_value)
- goto out;
- val_match = _gf_false;
- for (i = 0; i < conf_vals->no_of_pos_vals; i++) {
- if(conf_vals->case_sensitive){
- if (!strcmp (conf_vals->values[i], op_value))
- val_match = _gf_true;
- } else {
- if (!strcasecmp (conf_vals->values[i], op_value))
- val_match = _gf_true;
- }
- }
-
- if (!val_match) {
- ret = snprintf (errmsg, sizeof(errmsg) - 1,
- "Invalid value(%s) for"
- " option %s", op_value,
- op_name);
- errmsg[ret] = '\0';
-
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", errmsg);
- *op_errstr = gf_strdup (errmsg);
- return -1;
- }
- }
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "%s", errmsg);
+ *op_errstr = gf_strdup(errmsg);
+ return -1;
+ }
}
+ }
out:
- return 0;
+ return 0;
}
static int
-glusterd_get_gsync_status_mst_slv (glusterd_volinfo_t *volinfo,
- char *slave, char *conf_path,
- dict_t *rsp_dict, char *node);
+glusterd_get_gsync_status_mst_slv(glusterd_volinfo_t *volinfo, char *slave,
+ char *conf_path, dict_t *rsp_dict,
+ char *node);
static int
-_get_status_mst_slv (dict_t *dict, char *key, data_t *value, void *data)
+_get_status_mst_slv(dict_t *dict, char *key, data_t *value, void *data)
{
- glusterd_gsync_status_temp_t *param = NULL;
- char *slave = NULL;
- char *slave_buf = NULL;
- char *slave_url = NULL;
- char *slave_vol = NULL;
- char *slave_host = NULL;
- char *errmsg = NULL;
- char conf_path[PATH_MAX] = "";
- int ret = -1;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- char slv_url[VOLINFO_SLAVE_URL_MAX] = {0};
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- param = (glusterd_gsync_status_temp_t *)data;
-
- GF_VALIDATE_OR_GOTO (this->name, param, out);
- GF_VALIDATE_OR_GOTO (this->name, param->volinfo, out);
-
- if (this)
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
-
- if (snprintf (slv_url, sizeof(slv_url), "%s", value->data) >=
- sizeof (slv_url)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "Error in copying slave: %s!", value->data);
- goto out;
- }
-
- ret = parse_slave_url (slv_url, &slave);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "Error in parsing slave: %s!", value->data);
- goto out;
- }
-
- ret = glusterd_get_slave_info (slave, &slave_url,
- &slave_host, &slave_vol, &errmsg);
- if (ret) {
- if (errmsg)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "Unable to fetch slave details. Error: %s",
- errmsg);
- else
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "Unable to fetch slave details.");
- ret = -1;
- goto out;
- }
-
- ret = snprintf (conf_path, sizeof(conf_path) - 1,
- "%s/"GEOREP"/%s_%s_%s/gsyncd.conf",
- priv->workdir, param->volinfo->volname,
- slave_host, slave_vol);
- conf_path[ret] = '\0';
-
- ret = glusterd_get_gsync_status_mst_slv(param->volinfo,
- slave, conf_path,
- param->rsp_dict,
- param->node);
+ glusterd_gsync_status_temp_t *param = NULL;
+ char *slave = NULL;
+ char *slave_buf = NULL;
+ char *slave_url = NULL;
+ char *slave_vol = NULL;
+ char *slave_host = NULL;
+ char *errmsg = NULL;
+ char conf_path[PATH_MAX] = "";
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ char slv_url[VOLINFO_SLAVE_URL_MAX] = {0};
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ param = (glusterd_gsync_status_temp_t *)data;
+
+ GF_VALIDATE_OR_GOTO(this->name, param, out);
+ GF_VALIDATE_OR_GOTO(this->name, param->volinfo, out);
+
+ if (this)
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ if (snprintf(slv_url, sizeof(slv_url), "%s", value->data) >=
+ sizeof(slv_url)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "Error in copying slave: %s!", value->data);
+ goto out;
+ }
+
+ ret = parse_slave_url(slv_url, &slave);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "Error in parsing slave: %s!", value->data);
+ goto out;
+ }
+
+ ret = glusterd_get_slave_info(slave, &slave_url, &slave_host, &slave_vol,
+ &errmsg);
+ if (ret) {
+ if (errmsg)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch slave details. Error: %s", errmsg);
+ else
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch slave details.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = snprintf(conf_path, sizeof(conf_path) - 1,
+ "%s/" GEOREP "/%s_%s_%s/gsyncd.conf", priv->workdir,
+ param->volinfo->volname, slave_host, slave_vol);
+ conf_path[ret] = '\0';
+
+ ret = glusterd_get_gsync_status_mst_slv(param->volinfo, slave, conf_path,
+ param->rsp_dict, param->node);
out:
- if (errmsg)
- GF_FREE (errmsg);
+ if (errmsg)
+ GF_FREE(errmsg);
- if (slave_buf)
- GF_FREE(slave_buf);
+ if (slave_buf)
+ GF_FREE(slave_buf);
- if (slave_vol)
- GF_FREE (slave_vol);
+ if (slave_vol)
+ GF_FREE(slave_vol);
- if (slave_url)
- GF_FREE (slave_url);
+ if (slave_url)
+ GF_FREE(slave_url);
- if (slave_host)
- GF_FREE (slave_host);
+ if (slave_host)
+ GF_FREE(slave_host);
- gf_msg_debug (this->name, 0, "Returning %d.", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d.", ret);
+ return ret;
}
-
static int
-_get_max_gsync_slave_num (dict_t *dict, char *key, data_t *value, void *data)
+_get_max_gsync_slave_num(dict_t *dict, char *key, data_t *value, void *data)
{
- int tmp_slvnum = 0;
- int *slvnum = (int *)data;
+ int tmp_slvnum = 0;
+ int *slvnum = (int *)data;
- sscanf (key, "slave%d", &tmp_slvnum);
- if (tmp_slvnum > *slvnum)
- *slvnum = tmp_slvnum;
+ sscanf(key, "slave%d", &tmp_slvnum);
+ if (tmp_slvnum > *slvnum)
+ *slvnum = tmp_slvnum;
- return 0;
+ return 0;
}
static int
-_get_slave_idx_slave_voluuid (dict_t *dict, char *key, data_t *value,
- void *data)
+_get_slave_idx_slave_voluuid(dict_t *dict, char *key, data_t *value, void *data)
{
- char *slave_info = NULL;
- xlator_t *this = NULL;
- struct slave_vol_config *slave_cfg = NULL;
- int i = 0;
- int ret = -1;
- unsigned tmp_slvnum = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+ char *slave_info = NULL;
+ xlator_t *this = NULL;
+ struct slave_vol_config *slave_cfg = NULL;
+ int i = 0;
+ int ret = -1;
+ unsigned tmp_slvnum = 0;
- slave_cfg = data;
-
- if (value)
- slave_info = value->data;
-
- if (!(slave_info) || strlen (slave_info) == 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_INVALID_SLAVE,
- "Invalid slave in dict");
- ret = -2;
- goto out;
- }
-
- /* slave format:
- * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */
- while (i++ < 5) {
- slave_info = strchr (slave_info, ':');
- if (slave_info)
- slave_info++;
- else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "slave_info becomes NULL!");
- ret = -2;
- goto out;
- }
- }
- if (strcmp (slave_info, slave_cfg->slave_voluuid) == 0) {
- gf_msg_debug (this->name, 0, "Same slave volume "
- "already present %s",
- slave_cfg->slave_voluuid);
- ret = -1;
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
- sscanf (key, "slave%d", &tmp_slvnum);
- slave_cfg->old_slvidx = tmp_slvnum;
+ slave_cfg = data;
- gf_msg_debug (this->name, 0, "and "
- "its index is: %d", tmp_slvnum);
- goto out;
- }
+ if (value)
+ slave_info = value->data;
- ret = 0;
+ if (!(slave_info) || strlen(slave_info) == 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_SLAVE,
+ "Invalid slave in dict");
+ ret = -2;
+ goto out;
+ }
+
+ /* slave format:
+ * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */
+ while (i++ < 5) {
+ slave_info = strchr(slave_info, ':');
+ if (slave_info)
+ slave_info++;
+ else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "slave_info becomes NULL!");
+ ret = -2;
+ goto out;
+ }
+ }
+ if (strcmp(slave_info, slave_cfg->slave_voluuid) == 0) {
+ gf_msg_debug(this->name, 0,
+ "Same slave volume "
+ "already present %s",
+ slave_cfg->slave_voluuid);
+ ret = -1;
+
+ sscanf(key, "slave%d", &tmp_slvnum);
+ slave_cfg->old_slvidx = tmp_slvnum;
+
+ gf_msg_debug(this->name, 0,
+ "and "
+ "its index is: %d",
+ tmp_slvnum);
+ goto out;
+ }
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
static int
-glusterd_remove_slave_in_info (glusterd_volinfo_t *volinfo, char *slave,
- char **op_errstr)
+glusterd_remove_slave_in_info(glusterd_volinfo_t *volinfo, char *slave,
+ char **op_errstr)
{
- int zero_slave_entries = _gf_true;
- int ret = 0;
- char *slavekey = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (volinfo);
- GF_ASSERT (slave);
-
- do {
- ret = glusterd_get_slave (volinfo, slave, &slavekey);
- if (ret < 0 && zero_slave_entries) {
- ret++;
- goto out;
- }
- zero_slave_entries = _gf_false;
- dict_del (volinfo->gsync_slaves, slavekey);
- } while (ret >= 0);
-
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret) {
- *op_errstr = gf_strdup ("Failed to store the Volume"
- "information");
- goto out;
- }
- out:
- gf_msg_debug (this->name, 0, "returning %d", ret);
- return ret;
-
+ int zero_slave_entries = _gf_true;
+ int ret = 0;
+ char *slavekey = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(volinfo);
+ GF_ASSERT(slave);
+
+ do {
+ ret = glusterd_get_slave(volinfo, slave, &slavekey);
+ if (ret < 0 && zero_slave_entries) {
+ ret++;
+ goto out;
+ }
+ zero_slave_entries = _gf_false;
+ dict_del(volinfo->gsync_slaves, slavekey);
+ } while (ret >= 0);
+
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ *op_errstr = gf_strdup(
+ "Failed to store the Volume"
+ "information");
+ goto out;
+ }
+out:
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ return ret;
}
static int
-glusterd_gsync_get_uuid (char *slave, glusterd_volinfo_t *vol,
- uuid_t uuid)
+glusterd_gsync_get_uuid(char *slave, glusterd_volinfo_t *vol, uuid_t uuid)
{
- int ret = 0;
- char *slavekey = NULL;
- char *slaveentry = NULL;
- char *t = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (vol);
- GF_ASSERT (slave);
-
- ret = glusterd_get_slave (vol, slave, &slavekey);
- if (ret < 0) {
- /* XXX colliding cases of failure and non-extant
- * slave... now just doing this as callers of this
- * function can make sense only of -1 and 0 as retvals;
- * getting at the proper semanticals will involve
- * fixing callers as well.
- */
- ret = -1;
- goto out;
- }
+ int ret = 0;
+ char *slavekey = NULL;
+ char *slaveentry = NULL;
+ char *t = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(vol);
+ GF_ASSERT(slave);
+
+ ret = glusterd_get_slave(vol, slave, &slavekey);
+ if (ret < 0) {
+ /* XXX colliding cases of failure and non-extant
+ * slave... now just doing this as callers of this
+ * function can make sense only of -1 and 0 as retvals;
+ * getting at the proper semanticals will involve
+ * fixing callers as well.
+ */
+ ret = -1;
+ goto out;
+ }
- ret = dict_get_str (vol->gsync_slaves, slavekey, &slaveentry);
- GF_ASSERT (ret == 0);
+ ret = dict_get_str(vol->gsync_slaves, slavekey, &slaveentry);
+ GF_ASSERT(ret == 0);
- t = strchr (slaveentry, ':');
- GF_ASSERT (t);
- *t = '\0';
- ret = gf_uuid_parse (slaveentry, uuid);
- *t = ':';
+ t = strchr(slaveentry, ':');
+ GF_ASSERT(t);
+ *t = '\0';
+ ret = gf_uuid_parse(slaveentry, uuid);
+ *t = ':';
- out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+out:
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-update_slave_voluuid (dict_t *dict, char *key, data_t *value, void *data)
+update_slave_voluuid(dict_t *dict, char *key, data_t *value, void *data)
{
- char *slave = NULL;
- char *slave_url = NULL;
- char *slave_vol = NULL;
- char *slave_host = NULL;
- char *errmsg = NULL;
- xlator_t *this = NULL;
- int ret = -1;
- char slv_url[VOLINFO_SLAVE_URL_MAX] = {0};
- char slave_voluuid[GF_UUID_BUF_SIZE] = {0};
- char *slave_info = NULL;
- char *new_value = NULL;
- char *same_key = NULL;
- int cnt = 0;
- gf_boolean_t *voluuid_updated = NULL;
-
- this = THIS;
-
- voluuid_updated = data;
- slave_info = value->data;
- gf_msg_debug (this->name, 0, "slave_info: %s!", slave_info);
+ char *slave = NULL;
+ char *slave_url = NULL;
+ char *slave_vol = NULL;
+ char *slave_host = NULL;
+ char *errmsg = NULL;
+ xlator_t *this = NULL;
+ int ret = -1;
+ char slv_url[VOLINFO_SLAVE_URL_MAX] = {0};
+ char slave_voluuid[GF_UUID_BUF_SIZE] = {0};
+ char *slave_info = NULL;
+ char *new_value = NULL;
+ char *same_key = NULL;
+ int cnt = 0;
+ gf_boolean_t *voluuid_updated = NULL;
+
+ this = THIS;
+
+ voluuid_updated = data;
+ slave_info = value->data;
+ gf_msg_debug(this->name, 0, "slave_info: %s!", slave_info);
+
+ /* old slave format:
+ * master_node_uuid:ssh://slave_host::slave_vol
+ * New slave format:
+ * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */
+ while (slave_info) {
+ slave_info = strchr(slave_info, ':');
+ if (slave_info)
+ cnt++;
+ else
+ break;
- /* old slave format:
- * master_node_uuid:ssh://slave_host::slave_vol
- * New slave format:
- * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */
- while (slave_info) {
- slave_info = strchr (slave_info, ':');
- if (slave_info)
- cnt++;
- else
- break;
+ slave_info++;
+ }
- slave_info++;
+ gf_msg_debug(this->name, 0, "cnt: %d", cnt);
+ /* check whether old slave format and update vol uuid if old format.
+ * With volume uuid, number of ':' is 5 and is 4 without.
+ */
+ if (cnt == 4) {
+ if (snprintf(slv_url, sizeof(slv_url), "%s", value->data) >=
+ sizeof(slv_url)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "Error in copying slave: %s!", value->data);
+ goto out;
}
- gf_msg_debug (this->name, 0, "cnt: %d", cnt);
- /* check whether old slave format and update vol uuid if old format.
- * With volume uuid, number of ':' is 5 and is 4 without.
- */
- if (cnt == 4) {
- if (snprintf (slv_url, sizeof(slv_url), "%s", value->data) >=
- sizeof (slv_url)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "Error in copying slave: %s!", value->data);
- goto out;
- }
-
- ret = parse_slave_url (slv_url, &slave);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "Error in parsing slave: %s!", value->data);
- goto out;
- }
-
- ret = glusterd_get_slave_info (slave, &slave_url,
- &slave_host, &slave_vol, &errmsg);
- if (ret) {
- if (errmsg)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "Unable to fetch slave details. Error: %s",
- errmsg);
- else
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "Unable to fetch slave details.");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_get_slave_voluuid (slave_host, slave_vol,
- slave_voluuid);
- if ((ret) || (strlen(slave_voluuid) == 0)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REMOTE_VOL_UUID_FAIL,
- "Unable to get remote volume uuid"
- "slavehost:%s slavevol:%s",
- slave_host, slave_vol);
- /* Avoiding failure due to remote vol uuid fetch */
- ret = 0;
- goto out;
- }
- ret = gf_asprintf (&new_value, "%s:%s",
- value->data, slave_voluuid);
- ret = gf_asprintf (&same_key, "%s", key);
-
- /* delete old key and add new value */
- dict_del (dict, key);
+ ret = parse_slave_url(slv_url, &slave);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "Error in parsing slave: %s!", value->data);
+ goto out;
+ }
- /* set new value for the same key*/
- ret = dict_set_dynstr (dict, same_key, new_value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REMOTE_VOL_UUID_FAIL,
- "Error in setting dict value"
- "new_value :%s", new_value);
- goto out;
- }
- *voluuid_updated = _gf_true;
+ ret = glusterd_get_slave_info(slave, &slave_url, &slave_host,
+ &slave_vol, &errmsg);
+ if (ret) {
+ if (errmsg)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch slave details. Error: %s", errmsg);
+ else
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch slave details.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_get_slave_voluuid(slave_host, slave_vol, slave_voluuid);
+ if ((ret) || (strlen(slave_voluuid) == 0)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL,
+ "Unable to get remote volume uuid"
+ "slavehost:%s slavevol:%s",
+ slave_host, slave_vol);
+ /* Avoiding failure due to remote vol uuid fetch */
+ ret = 0;
+ goto out;
+ }
+ ret = gf_asprintf(&new_value, "%s:%s", value->data, slave_voluuid);
+ ret = gf_asprintf(&same_key, "%s", key);
+
+ /* delete old key and add new value */
+ dict_del(dict, key);
+
+ /* set new value for the same key*/
+ ret = dict_set_dynstr(dict, same_key, new_value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL,
+ "Error in setting dict value"
+ "new_value :%s",
+ new_value);
+ goto out;
}
+ *voluuid_updated = _gf_true;
+ }
- ret = 0;
+ ret = 0;
out:
- if (errmsg)
- GF_FREE (errmsg);
+ if (errmsg)
+ GF_FREE(errmsg);
- if (slave_url)
- GF_FREE (slave_url);
+ if (slave_url)
+ GF_FREE(slave_url);
- if (slave_vol)
- GF_FREE (slave_vol);
+ if (slave_vol)
+ GF_FREE(slave_vol);
- if (slave_host)
- GF_FREE (slave_host);
+ if (slave_host)
+ GF_FREE(slave_host);
- gf_msg_debug (this->name, 0, "Returning %d.", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d.", ret);
+ return ret;
}
static int
-glusterd_update_slave_voluuid_slaveinfo (glusterd_volinfo_t *volinfo)
+glusterd_update_slave_voluuid_slaveinfo(glusterd_volinfo_t *volinfo)
{
- int ret = -1;
- xlator_t *this = NULL;
- gf_boolean_t voluuid_updated = _gf_false;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
- GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
-
- ret = dict_foreach (volinfo->gsync_slaves, update_slave_voluuid,
- &voluuid_updated);
+ int ret = -1;
+ xlator_t *this = NULL;
+ gf_boolean_t voluuid_updated = _gf_false;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
+
+ ret = dict_foreach(volinfo->gsync_slaves, update_slave_voluuid,
+ &voluuid_updated);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL,
+ "Error in updating"
+ "volinfo");
+ goto out;
+ }
+
+ if (_gf_true == voluuid_updated) {
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REMOTE_VOL_UUID_FAIL, "Error in updating"
- "volinfo");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
+ "Error in storing"
+ "volinfo");
+ goto out;
}
+ }
- if (_gf_true == voluuid_updated) {
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_STORE_FAIL, "Error in storing"
- "volinfo");
- goto out;
- }
- }
-
- ret = 0;
+ ret = 0;
out:
- gf_msg_debug ((this ? this->name : "glusterd"),
- 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug((this ? this->name : "glusterd"), 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_check_gsync_running_local (char *master, char *slave,
- char *conf_path,
- gf_boolean_t *is_run)
+glusterd_check_gsync_running_local(char *master, char *slave, char *conf_path,
+ gf_boolean_t *is_run)
{
- int ret = -1;
- int ret_status = 0;
- gf_boolean_t is_template_in_use = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (master);
- GF_ASSERT (slave);
- GF_ASSERT (is_run);
-
- *is_run = _gf_false;
- ret = gsync_status (master, slave, conf_path,
- &ret_status, &is_template_in_use);
- if (ret == 0 && ret_status == 0)
- *is_run = _gf_true;
- else if (ret == -1) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_VALIDATE_FAILED,
- GEOREP" validation failed");
- goto out;
- }
- ret = 0;
- out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
-
+ int ret = -1;
+ int ret_status = 0;
+ gf_boolean_t is_template_in_use = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(master);
+ GF_ASSERT(slave);
+ GF_ASSERT(is_run);
+
+ *is_run = _gf_false;
+ ret = gsync_status(master, slave, conf_path, &ret_status,
+ &is_template_in_use);
+ if (ret == 0 && ret_status == 0)
+ *is_run = _gf_true;
+ else if (ret == -1) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VALIDATE_FAILED,
+ GEOREP " validation failed");
+ goto out;
+ }
+ ret = 0;
+out:
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_store_slave_in_info (glusterd_volinfo_t *volinfo, char *slave,
- char *host_uuid, char *slave_voluuid,
- char **op_errstr, gf_boolean_t is_force)
+glusterd_store_slave_in_info(glusterd_volinfo_t *volinfo, char *slave,
+ char *host_uuid, char *slave_voluuid,
+ char **op_errstr, gf_boolean_t is_force)
{
- int ret = 0;
- int maxslv = 0;
- char **linearr = NULL;
- char *value = NULL;
- char *slavekey = NULL;
- char *slaveentry = NULL;
- char key[512] = {0, };
- char *t = NULL;
- xlator_t *this = NULL;
- struct slave_vol_config slave1 = {{0},};
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (volinfo);
- GF_ASSERT (slave);
- GF_ASSERT (host_uuid);
- GF_VALIDATE_OR_GOTO (this->name, slave_voluuid, out);
-
- ret = glusterd_get_slave (volinfo, slave, &slavekey);
- switch (ret) {
+ int ret = 0;
+ int maxslv = 0;
+ char **linearr = NULL;
+ char *value = NULL;
+ char *slavekey = NULL;
+ char *slaveentry = NULL;
+ char key[512] = {
+ 0,
+ };
+ char *t = NULL;
+ xlator_t *this = NULL;
+ struct slave_vol_config slave1 = {
+ {0},
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(volinfo);
+ GF_ASSERT(slave);
+ GF_ASSERT(host_uuid);
+ GF_VALIDATE_OR_GOTO(this->name, slave_voluuid, out);
+
+ ret = glusterd_get_slave(volinfo, slave, &slavekey);
+ switch (ret) {
case -2:
- ret = -1;
- goto out;
+ ret = -1;
+ goto out;
case -1:
- break;
+ break;
default:
- if (!is_force)
- GF_ASSERT (ret > 0);
- ret = dict_get_str (volinfo->gsync_slaves, slavekey, &slaveentry);
- GF_ASSERT (ret == 0);
-
- /* same-name + same-uuid slave entries should have been filtered
- * out in glusterd_op_verify_gsync_start_options(), so we can
- * assert an uuid mismatch
- */
- t = strtail (slaveentry, host_uuid);
- if (!is_force)
- GF_ASSERT (!t || *t != ':');
-
- if (is_force) {
- gf_msg_debug (this->name, 0, GEOREP" has already "
- "been invoked for the %s (master) and "
- "%s (slave). Allowing without saving "
- "info again due to force command.",
- volinfo->volname, slave);
- ret = 0;
- goto out;
- }
-
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_INVOKE_ERROR,
- GEOREP" has already been invoked for "
- "the %s (master) and %s (slave) from a different "
- "machine", volinfo->volname, slave);
- *op_errstr = gf_strdup (GEOREP" already running in "
- "another machine");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_urltransform_single (slave, "normalize", &linearr);
- if (ret == -1)
- goto out;
-
- ret = gf_asprintf (&value, "%s:%s:%s", host_uuid,
- linearr[0], slave_voluuid);
-
- glusterd_urltransform_free (linearr, 1);
- if (ret == -1)
+ if (!is_force)
+ GF_ASSERT(ret > 0);
+ ret = dict_get_str(volinfo->gsync_slaves, slavekey, &slaveentry);
+ GF_ASSERT(ret == 0);
+
+ /* same-name + same-uuid slave entries should have been filtered
+ * out in glusterd_op_verify_gsync_start_options(), so we can
+ * assert an uuid mismatch
+ */
+ t = strtail(slaveentry, host_uuid);
+ if (!is_force)
+ GF_ASSERT(!t || *t != ':');
+
+ if (is_force) {
+ gf_msg_debug(this->name, 0,
+ GEOREP
+ " has already "
+ "been invoked for the %s (master) and "
+ "%s (slave). Allowing without saving "
+ "info again due to force command.",
+ volinfo->volname, slave);
+ ret = 0;
goto out;
+ }
- /* Given the slave volume uuid, check and get any existing slave */
- memcpy (slave1.slave_voluuid, slave_voluuid, GF_UUID_BUF_SIZE);
- ret = dict_foreach (volinfo->gsync_slaves,
- _get_slave_idx_slave_voluuid, &slave1);
-
- if (ret == 0) { /* New slave */
- dict_foreach (volinfo->gsync_slaves, _get_max_gsync_slave_num,
- &maxslv);
- snprintf (key, sizeof (key), "slave%d", maxslv + 1);
-
- ret = dict_set_dynstr (volinfo->gsync_slaves, key, value);
- if (ret) {
- GF_FREE (value);
- goto out;
- }
- } else if (ret == -1) { /* Existing slave */
- snprintf (key, sizeof (key), "slave%d", slave1.old_slvidx);
-
- gf_msg_debug (this->name, 0, "Replacing key:%s with new value"
- ":%s", key, value);
-
- /* Add new slave's value, with the same slave index */
- ret = dict_set_dynstr (volinfo->gsync_slaves, key, value);
- if (ret) {
- GF_FREE (value);
- goto out;
- }
- } else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REMOTE_VOL_UUID_FAIL,
- "_get_slave_idx_slave_voluuid failed!");
- GF_FREE (value);
- ret = -1;
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVOKE_ERROR,
+ GEOREP
+ " has already been invoked for "
+ "the %s (master) and %s (slave) from a different "
+ "machine",
+ volinfo->volname, slave);
+ *op_errstr = gf_strdup(GEOREP
+ " already running in "
+ "another machine");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_urltransform_single(slave, "normalize", &linearr);
+ if (ret == -1)
+ goto out;
+
+ ret = gf_asprintf(&value, "%s:%s:%s", host_uuid, linearr[0], slave_voluuid);
+
+ glusterd_urltransform_free(linearr, 1);
+ if (ret == -1)
+ goto out;
+
+ /* Given the slave volume uuid, check and get any existing slave */
+ memcpy(slave1.slave_voluuid, slave_voluuid, GF_UUID_BUF_SIZE);
+ ret = dict_foreach(volinfo->gsync_slaves, _get_slave_idx_slave_voluuid,
+ &slave1);
+
+ if (ret == 0) { /* New slave */
+ dict_foreach(volinfo->gsync_slaves, _get_max_gsync_slave_num, &maxslv);
+ snprintf(key, sizeof(key), "slave%d", maxslv + 1);
+
+ ret = dict_set_dynstr(volinfo->gsync_slaves, key, value);
+ if (ret) {
+ GF_FREE(value);
+ goto out;
}
+ } else if (ret == -1) { /* Existing slave */
+ snprintf(key, sizeof(key), "slave%d", slave1.old_slvidx);
+
+ gf_msg_debug(this->name, 0,
+ "Replacing key:%s with new value"
+ ":%s",
+ key, value);
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ /* Add new slave's value, with the same slave index */
+ ret = dict_set_dynstr(volinfo->gsync_slaves, key, value);
if (ret) {
- *op_errstr = gf_strdup ("Failed to store the Volume "
- "information");
- goto out;
- }
- ret = 0;
- out:
- return ret;
+ GF_FREE(value);
+ goto out;
+ }
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL,
+ "_get_slave_idx_slave_voluuid failed!");
+ GF_FREE(value);
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ *op_errstr = gf_strdup(
+ "Failed to store the Volume "
+ "information");
+ goto out;
+ }
+ ret = 0;
+out:
+ return ret;
}
static int
-glusterd_op_verify_gsync_start_options (glusterd_volinfo_t *volinfo,
- char *slave, char *conf_path,
- char *statefile, char **op_errstr,
- gf_boolean_t is_force)
+glusterd_op_verify_gsync_start_options(glusterd_volinfo_t *volinfo, char *slave,
+ char *conf_path, char *statefile,
+ char **op_errstr, gf_boolean_t is_force)
{
- int ret = -1;
- int ret_status = 0;
- gf_boolean_t is_template_in_use = _gf_false;
- char msg[2048] = {0};
- uuid_t uuid = {0};
- xlator_t *this = NULL;
- struct stat stbuf = {0,};
- char statefiledir[PATH_MAX] = {0,};
- char *statedir = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (volinfo);
- GF_ASSERT (slave);
- GF_ASSERT (op_errstr);
- GF_ASSERT (conf_path);
- GF_ASSERT (this && this->private);
-
- if (GLUSTERD_STATUS_STARTED != volinfo->status) {
- snprintf (msg, sizeof (msg), "Volume %s needs to be started "
- "before "GEOREP" start", volinfo->volname);
- goto out;
- }
-
- /* check session directory as statefile may not present
- * during upgrade */
- if (snprintf (statefiledir, sizeof (statefiledir), "%s", statefile) >=
- sizeof (statefiledir)) {
- snprintf (msg, sizeof (msg), "statefiledir truncated");
- gf_msg (this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
- "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- statedir = dirname (statefiledir);
-
- ret = sys_lstat (statedir, &stbuf);
- if (ret) {
- snprintf (msg, sizeof (msg), "Session between %s and %s has"
- " not been created. Please create session and retry.",
- volinfo->volname, slave);
- gf_msg (this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
- "%s statefile: %s", msg, statefile);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
-
- /* Check if the gsync slave info is stored. If not
- * session has not been created */
- ret = glusterd_gsync_get_uuid (slave, volinfo, uuid);
- if (ret) {
- snprintf (msg, sizeof (msg), "Session between %s and %s has"
- " not been created. Please create session and retry.",
- volinfo->volname, slave);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SESSION_CREATE_ERROR,
- "%s", msg);
- goto out;
- }
-
- /*Check if the gsync is already started in cmd. inited host
- * If so initiate add it into the glusterd's priv*/
- ret = gsync_status (volinfo->volname, slave, conf_path,
- &ret_status, &is_template_in_use);
- if (ret == 0) {
- if ((ret_status == 0) && !is_force) {
- snprintf (msg, sizeof (msg), GEOREP " session between"
- " %s & %s already started", volinfo->volname,
- slave);
- ret = -1;
- goto out;
- }
- } else if (ret == -1) {
- snprintf (msg, sizeof (msg), GEOREP" start option "
- "validation failed ");
- goto out;
- }
-
- if (is_template_in_use == _gf_true) {
- snprintf (msg, sizeof (msg), GEOREP" start "
- "failed : pid-file entry missing "
- "in config file.");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_verify_gsyncd_spawn (volinfo->volname, slave);
- if (ret && !is_force) {
- snprintf (msg, sizeof (msg), "Unable to spawn gsyncd");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED,
- "%s", msg);
- }
+ int ret = -1;
+ int ret_status = 0;
+ gf_boolean_t is_template_in_use = _gf_false;
+ char msg[2048] = {0};
+ uuid_t uuid = {0};
+ xlator_t *this = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ char statefiledir[PATH_MAX] = {
+ 0,
+ };
+ char *statedir = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(volinfo);
+ GF_ASSERT(slave);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(conf_path);
+ GF_ASSERT(this && this->private);
+
+ if (GLUSTERD_STATUS_STARTED != volinfo->status) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s needs to be started "
+ "before " GEOREP " start",
+ volinfo->volname);
+ goto out;
+ }
+
+ /* check session directory as statefile may not present
+ * during upgrade */
+ if (snprintf(statefiledir, sizeof(statefiledir), "%s", statefile) >=
+ sizeof(statefiledir)) {
+ snprintf(msg, sizeof(msg), "statefiledir truncated");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+ statedir = dirname(statefiledir);
+
+ ret = sys_lstat(statedir, &stbuf);
+ if (ret) {
+ snprintf(msg, sizeof(msg),
+ "Session between %s and %s has"
+ " not been created. Please create session and retry.",
+ volinfo->volname, slave);
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "%s statefile: %s", msg, statefile);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ /* Check if the gsync slave info is stored. If not
+ * session has not been created */
+ ret = glusterd_gsync_get_uuid(slave, volinfo, uuid);
+ if (ret) {
+ snprintf(msg, sizeof(msg),
+ "Session between %s and %s has"
+ " not been created. Please create session and retry.",
+ volinfo->volname, slave);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_CREATE_ERROR, "%s",
+ msg);
+ goto out;
+ }
+
+ /*Check if the gsync is already started in cmd. inited host
+ * If so initiate add it into the glusterd's priv*/
+ ret = gsync_status(volinfo->volname, slave, conf_path, &ret_status,
+ &is_template_in_use);
+ if (ret == 0) {
+ if ((ret_status == 0) && !is_force) {
+ snprintf(msg, sizeof(msg),
+ GEOREP
+ " session between"
+ " %s & %s already started",
+ volinfo->volname, slave);
+ ret = -1;
+ goto out;
+ }
+ } else if (ret == -1) {
+ snprintf(msg, sizeof(msg),
+ GEOREP
+ " start option "
+ "validation failed ");
+ goto out;
+ }
+
+ if (is_template_in_use == _gf_true) {
+ snprintf(msg, sizeof(msg),
+ GEOREP
+ " start "
+ "failed : pid-file entry missing "
+ "in config file.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_verify_gsyncd_spawn(volinfo->volname, slave);
+ if (ret && !is_force) {
+ snprintf(msg, sizeof(msg), "Unable to spawn gsyncd");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED, "%s",
+ msg);
+ }
out:
- if (ret && (msg[0] != '\0')) {
- *op_errstr = gf_strdup (msg);
- }
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ if (ret && (msg[0] != '\0')) {
+ *op_errstr = gf_strdup(msg);
+ }
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
void
-glusterd_check_geo_rep_configured (glusterd_volinfo_t *volinfo,
- gf_boolean_t *flag)
+glusterd_check_geo_rep_configured(glusterd_volinfo_t *volinfo,
+ gf_boolean_t *flag)
{
+ GF_ASSERT(volinfo);
+ GF_ASSERT(flag);
- GF_ASSERT (volinfo);
- GF_ASSERT (flag);
-
- if (volinfo->gsync_slaves->count)
- *flag = _gf_true;
- else
- *flag = _gf_false;
+ if (volinfo->gsync_slaves->count)
+ *flag = _gf_true;
+ else
+ *flag = _gf_false;
- return;
+ return;
}
/*
@@ -1974,70 +1961,70 @@ glusterd_check_geo_rep_configured (glusterd_volinfo_t *volinfo,
*/
static int
-is_geo_rep_active (glusterd_volinfo_t *volinfo, char *slave,
- char *conf_path, int *is_active)
+is_geo_rep_active(glusterd_volinfo_t *volinfo, char *slave, char *conf_path,
+ int *is_active)
{
- dict_t *confd = NULL;
- char *statefile = NULL;
- char *master = NULL;
- char monitor_status[PATH_MAX] = "";
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- master = volinfo->volname;
-
- confd = dict_new ();
- if (!confd) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
- "Not able to create dict.");
- goto out;
- }
-
- ret = glusterd_gsync_get_config (master, slave, conf_path,
- confd);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GET_CONFIG_INFO_FAILED,
- "Unable to get configuration data "
- "for %s(master), %s(slave)", master, slave);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_param (confd, "state_file", &statefile);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get state_file's name "
- "for %s(master), %s(slave). Please check gsync "
- "config file.", master, slave);
- ret = -1;
- goto out;
- }
-
- ret = glusterd_gsync_read_frm_status (statefile, monitor_status,
- sizeof (monitor_status));
- if (ret <= 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STAT_FILE_READ_FAILED,
- "Unable to read the status file for %s(master), "
- "%s(slave)", master, slave);
- snprintf (monitor_status, sizeof (monitor_status), "defunct");
- }
-
- if ((!strcmp(monitor_status, "Stopped")) ||
- (!strcmp(monitor_status, "Created"))) {
- *is_active = 0;
- } else {
- *is_active = 1;
- }
- ret = 0;
+ dict_t *confd = NULL;
+ char *statefile = NULL;
+ char *master = NULL;
+ char monitor_status[PATH_MAX] = "";
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ master = volinfo->volname;
+
+ confd = dict_new();
+ if (!confd) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Not able to create dict.");
+ goto out;
+ }
+
+ ret = glusterd_gsync_get_config(master, slave, conf_path, confd);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED,
+ "Unable to get configuration data "
+ "for %s(master), %s(slave)",
+ master, slave);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_param(confd, "state_file", &statefile);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get state_file's name "
+ "for %s(master), %s(slave). Please check gsync "
+ "config file.",
+ master, slave);
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_gsync_read_frm_status(statefile, monitor_status,
+ sizeof(monitor_status));
+ if (ret <= 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STAT_FILE_READ_FAILED,
+ "Unable to read the status file for %s(master), "
+ "%s(slave)",
+ master, slave);
+ snprintf(monitor_status, sizeof(monitor_status), "defunct");
+ }
+
+ if ((!strcmp(monitor_status, "Stopped")) ||
+ (!strcmp(monitor_status, "Created"))) {
+ *is_active = 0;
+ } else {
+ *is_active = 1;
+ }
+ ret = 0;
out:
- if (confd)
- dict_unref (confd);
- return ret;
+ if (confd)
+ dict_unref(confd);
+ return ret;
}
/*
@@ -2053,89 +2040,86 @@ out:
*/
int
-_get_slave_status (dict_t *dict, char *key, data_t *value, void *data)
+_get_slave_status(dict_t *dict, char *key, data_t *value, void *data)
{
- gsync_status_param_t *param = NULL;
- char *slave = NULL;
- char *slave_url = NULL;
- char *slave_vol = NULL;
- char *slave_host = NULL;
- char *errmsg = NULL;
- char conf_path[PATH_MAX] = "";
- int ret = -1;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- param = (gsync_status_param_t *)data;
-
- GF_ASSERT (param);
- GF_ASSERT (param->volinfo);
- if (param->is_active) {
- ret = 0;
- goto out;
- }
-
- this = THIS;
- GF_ASSERT (this);
+ gsync_status_param_t *param = NULL;
+ char *slave = NULL;
+ char *slave_url = NULL;
+ char *slave_vol = NULL;
+ char *slave_host = NULL;
+ char *errmsg = NULL;
+ char conf_path[PATH_MAX] = "";
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ param = (gsync_status_param_t *)data;
+
+ GF_ASSERT(param);
+ GF_ASSERT(param->volinfo);
+ if (param->is_active) {
+ ret = 0;
+ goto out;
+ }
- priv = this->private;
- if (priv == NULL) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
- "priv of glusterd not present");
- goto out;
- }
+ this = THIS;
+ GF_ASSERT(this);
- slave = strchr (value->data, ':');
- if (!slave) {
- ret = 0;
- goto out;
- }
- slave++;
+ priv = this->private;
+ if (priv == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
+ "priv of glusterd not present");
+ goto out;
+ }
- ret = glusterd_get_slave_info (slave, &slave_url,
- &slave_host, &slave_vol, &errmsg);
- if (ret) {
- if (errmsg)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR, "Unable to fetch"
- " slave details. Error: %s", errmsg);
- else
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "Unable to fetch slave details.");
- ret = -1;
- goto out;
- }
-
- ret = snprintf (conf_path, sizeof(conf_path) - 1,
- "%s/"GEOREP"/%s_%s_%s/gsyncd.conf",
- priv->workdir, param->volinfo->volname,
- slave_host, slave_vol);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CONF_PATH_ASSIGN_FAILED,
- "Unable to assign conf_path.");
- ret = -1;
- goto out;
- }
- conf_path[ret] = '\0';
+ slave = strchr(value->data, ':');
+ if (!slave) {
+ ret = 0;
+ goto out;
+ }
+ slave++;
- ret = is_geo_rep_active (param->volinfo,slave, conf_path,
- &param->is_active);
-out:
+ ret = glusterd_get_slave_info(slave, &slave_url, &slave_host, &slave_vol,
+ &errmsg);
+ if (ret) {
if (errmsg)
- GF_FREE (errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch"
+ " slave details. Error: %s",
+ errmsg);
+ else
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch slave details.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = snprintf(conf_path, sizeof(conf_path) - 1,
+ "%s/" GEOREP "/%s_%s_%s/gsyncd.conf", priv->workdir,
+ param->volinfo->volname, slave_host, slave_vol);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CONF_PATH_ASSIGN_FAILED,
+ "Unable to assign conf_path.");
+ ret = -1;
+ goto out;
+ }
+ conf_path[ret] = '\0';
+
+ ret = is_geo_rep_active(param->volinfo, slave, conf_path,
+ &param->is_active);
+out:
+ if (errmsg)
+ GF_FREE(errmsg);
- if (slave_vol)
- GF_FREE (slave_vol);
+ if (slave_vol)
+ GF_FREE(slave_vol);
- if (slave_url)
- GF_FREE (slave_url);
- if (slave_host)
- GF_FREE (slave_host);
+ if (slave_url)
+ GF_FREE(slave_url);
+ if (slave_host)
+ GF_FREE(slave_host);
- return ret;
+ return ret;
}
/* glusterd_check_geo_rep_running:
@@ -2149,909 +2133,924 @@ out:
*/
int
-glusterd_check_geo_rep_running (gsync_status_param_t *param, char **op_errstr)
+glusterd_check_geo_rep_running(gsync_status_param_t *param, char **op_errstr)
{
- char msg[2048] = {0,};
- gf_boolean_t enabled = _gf_false;
- int ret = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (param);
- GF_ASSERT (param->volinfo);
- GF_ASSERT (op_errstr);
-
- glusterd_check_geo_rep_configured (param->volinfo, &enabled);
-
- if (enabled) {
- ret = dict_foreach (param->volinfo->gsync_slaves,
- _get_slave_status, param);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "_get_slave_satus failed");
- snprintf (msg, sizeof(msg), GEOREP" Unable to"
- " get the status of active "GEOREP""
- " session for the volume '%s'.\n"
- " Please check the log file for"
- " more info.", param->volinfo->volname);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
+ char msg[2048] = {
+ 0,
+ };
+ gf_boolean_t enabled = _gf_false;
+ int ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(param);
+ GF_ASSERT(param->volinfo);
+ GF_ASSERT(op_errstr);
+
+ glusterd_check_geo_rep_configured(param->volinfo, &enabled);
+
+ if (enabled) {
+ ret = dict_foreach(param->volinfo->gsync_slaves, _get_slave_status,
+ param);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "_get_slave_satus failed");
+ snprintf(msg, sizeof(msg),
+ GEOREP
+ " Unable to"
+ " get the status of active " GEOREP
+ ""
+ " session for the volume '%s'.\n"
+ " Please check the log file for"
+ " more info.",
+ param->volinfo->volname);
+ *op_errstr = gf_strdup(msg);
+ ret = -1;
+ goto out;
+ }
- if (param->is_active) {
- snprintf (msg, sizeof(msg), GEOREP" sessions"
- " are active for the volume %s.\nStop"
- " "GEOREP " sessions involved in this"
- " volume. Use 'volume "GEOREP
- " status' command for more info.",
- param->volinfo->volname);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- }
- out:
- return ret;
+ if (param->is_active) {
+ snprintf(msg, sizeof(msg),
+ GEOREP
+ " sessions"
+ " are active for the volume %s.\nStop"
+ " " GEOREP
+ " sessions involved in this"
+ " volume. Use 'volume " GEOREP
+ " status' command for more info.",
+ param->volinfo->volname);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+ }
+out:
+ return ret;
}
static int
-glusterd_op_verify_gsync_running (glusterd_volinfo_t *volinfo,
- char *slave, char *conf_path,
- char **op_errstr)
+glusterd_op_verify_gsync_running(glusterd_volinfo_t *volinfo, char *slave,
+ char *conf_path, char **op_errstr)
{
- int pfd = -1;
- int ret = -1;
- char msg[2048] = {0};
- char pidfile[PATH_MAX] = {0,};
- gf_boolean_t is_template_in_use = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (THIS && THIS->private);
- GF_ASSERT (volinfo);
- GF_ASSERT (slave);
- GF_ASSERT (conf_path);
- GF_ASSERT (op_errstr);
-
- if (GLUSTERD_STATUS_STARTED != volinfo->status) {
- snprintf (msg, sizeof (msg), "Volume %s needs to be started "
- "before "GEOREP" start", volinfo->volname);
-
- goto out;
- }
-
- pfd = gsyncd_getpidfile (volinfo->volname, slave, pidfile,
- conf_path, &is_template_in_use);
- if (pfd == -2) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED,
- GEOREP" stop validation failed for %s & %s",
- volinfo->volname, slave);
- ret = -1;
- goto out;
- }
- if (gsync_status_byfd (pfd) == -1) {
- snprintf (msg, sizeof (msg), GEOREP" session b/w %s & %s is "
- "not running on this node.", volinfo->volname,
- slave);
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_INACTIVE,
- "%s", msg);
- ret = -1;
- /* monitor gsyncd already dead */
- goto out;
- }
-
- if (is_template_in_use) {
- snprintf (msg, sizeof (msg), "pid-file entry missing in "
- "the config file(%s).", conf_path);
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND,
- "%s", msg);
- ret = -1;
- goto out;
- }
-
- if (pfd < 0)
- goto out;
-
- ret = 0;
+ int pfd = -1;
+ int ret = -1;
+ char msg[2048] = {0};
+ char pidfile[PATH_MAX] = {
+ 0,
+ };
+ gf_boolean_t is_template_in_use = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(THIS && THIS->private);
+ GF_ASSERT(volinfo);
+ GF_ASSERT(slave);
+ GF_ASSERT(conf_path);
+ GF_ASSERT(op_errstr);
+
+ if (GLUSTERD_STATUS_STARTED != volinfo->status) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s needs to be started "
+ "before " GEOREP " start",
+ volinfo->volname);
+
+ goto out;
+ }
+
+ pfd = gsyncd_getpidfile(volinfo->volname, slave, pidfile, conf_path,
+ &is_template_in_use);
+ if (pfd == -2) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED,
+ GEOREP " stop validation failed for %s & %s", volinfo->volname,
+ slave);
+ ret = -1;
+ goto out;
+ }
+ if (gsync_status_byfd(pfd) == -1) {
+ snprintf(msg, sizeof(msg),
+ GEOREP
+ " session b/w %s & %s is "
+ "not running on this node.",
+ volinfo->volname, slave);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_INACTIVE, "%s", msg);
+ ret = -1;
+ /* monitor gsyncd already dead */
+ goto out;
+ }
+
+ if (is_template_in_use) {
+ snprintf(msg, sizeof(msg),
+ "pid-file entry missing in "
+ "the config file(%s).",
+ conf_path);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND, "%s",
+ msg);
+ ret = -1;
+ goto out;
+ }
+
+ if (pfd < 0)
+ goto out;
+
+ ret = 0;
out:
- if (ret && (msg[0] != '\0')) {
- *op_errstr = gf_strdup (msg);
- }
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ if (ret && (msg[0] != '\0')) {
+ *op_errstr = gf_strdup(msg);
+ }
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_verify_gsync_status_opts (dict_t *dict, char **op_errstr)
+glusterd_verify_gsync_status_opts(dict_t *dict, char **op_errstr)
{
- char *slave = NULL;
- char *volname = NULL;
- char errmsg[PATH_MAX] = {0, };
- gf_boolean_t exists = _gf_false;
- glusterd_volinfo_t *volinfo = NULL;
- int ret = 0;
- char *conf_path = NULL;
- char *slave_url = NULL;
- char *slave_host = NULL;
- char *slave_vol = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- if (THIS)
- priv = THIS->private;
- if (priv == NULL) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
- "priv of glusterd not present");
- *op_errstr = gf_strdup ("glusterd defunct");
- goto out;
- }
-
- ret = dict_get_str (dict, "master", &volname);
- if (ret < 0) {
- ret = 0;
- goto out;
- }
-
- exists = glusterd_check_volume_exists (volname);
- ret = glusterd_volinfo_find (volname, &volinfo);
- if ((ret) || (!exists)) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
- "volume name does not exist");
- snprintf (errmsg, sizeof(errmsg), "Volume name %s does not"
- " exist", volname);
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str (dict, "slave", &slave);
- if (ret < 0) {
- ret = 0;
- goto out;
- }
-
- ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_url,
- &slave_host, &slave_vol,
- &conf_path, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "Unable to fetch slave or confpath details.");
- ret = -1;
- goto out;
- }
+ char *slave = NULL;
+ char *volname = NULL;
+ char errmsg[PATH_MAX] = {
+ 0,
+ };
+ gf_boolean_t exists = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = 0;
+ char *conf_path = NULL;
+ char *slave_url = NULL;
+ char *slave_host = NULL;
+ char *slave_vol = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (THIS)
+ priv = THIS->private;
+ if (priv == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
+ "priv of glusterd not present");
+ *op_errstr = gf_strdup("glusterd defunct");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "master", &volname);
+ if (ret < 0) {
+ ret = 0;
+ goto out;
+ }
+
+ exists = glusterd_check_volume_exists(volname);
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if ((ret) || (!exists)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
+ "volume name does not exist");
+ snprintf(errmsg, sizeof(errmsg),
+ "Volume name %s does not"
+ " exist",
+ volname);
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "slave", &slave);
+ if (ret < 0) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = glusterd_get_slave_details_confpath(volinfo, dict, &slave_url,
+ &slave_host, &slave_vol,
+ &conf_path, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch slave or confpath details.");
+ ret = -1;
+ goto out;
+ }
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
-
int
-glusterd_op_gsync_args_get (dict_t *dict, char **op_errstr,
- char **master, char **slave, char **host_uuid)
+glusterd_op_gsync_args_get(dict_t *dict, char **op_errstr, char **master,
+ char **slave, char **host_uuid)
{
+ int ret = -1;
+ xlator_t *this = NULL;
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
- if (master) {
- ret = dict_get_str (dict, "master", master);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED, "master not found");
- *op_errstr = gf_strdup ("master not found");
- goto out;
- }
+ if (master) {
+ ret = dict_get_str(dict, "master", master);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "master not found");
+ *op_errstr = gf_strdup("master not found");
+ goto out;
}
+ }
- if (slave) {
- ret = dict_get_str (dict, "slave", slave);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED, "slave not found");
- *op_errstr = gf_strdup ("slave not found");
- goto out;
- }
+ if (slave) {
+ ret = dict_get_str(dict, "slave", slave);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "slave not found");
+ *op_errstr = gf_strdup("slave not found");
+ goto out;
}
+ }
- if (host_uuid) {
- ret = dict_get_str (dict, "host-uuid", host_uuid);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED, "host_uuid not found");
- *op_errstr = gf_strdup ("host_uuid not found");
- goto out;
- }
+ if (host_uuid) {
+ ret = dict_get_str(dict, "host-uuid", host_uuid);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "host_uuid not found");
+ *op_errstr = gf_strdup("host_uuid not found");
+ goto out;
}
+ }
- ret = 0;
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_op_stage_sys_exec (dict_t *dict, char **op_errstr)
+glusterd_op_stage_sys_exec(dict_t *dict, char **op_errstr)
{
- char errmsg[PATH_MAX] = "";
- char *command = NULL;
- char command_path[PATH_MAX] = "";
- struct stat st = {0,};
- int ret = -1;
- glusterd_conf_t *conf = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- if (conf->op_version < 2) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
- "Op Version not supported.");
- snprintf (errmsg, sizeof(errmsg), "One or more nodes do not"
- " support the required op version.");
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str (dict, "command", &command);
- if (ret) {
- strcpy (errmsg, "internal error");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get command from dict");
- goto out;
- }
-
- /* enforce local occurrence of the command */
- if (strchr (command, '/')) {
- strcpy (errmsg, "invalid command name");
- ret = -1;
- goto out;
- }
-
- sprintf (command_path, GSYNCD_PREFIX"/peer_%s", command);
- /* check if it's executable */
- ret = sys_access (command_path, X_OK);
- if (!ret)
- /* check if it's a regular file */
- ret = sys_stat (command_path, &st);
- if (!ret && !S_ISREG (st.st_mode))
- ret = -1;
+ char errmsg[PATH_MAX] = "";
+ char *command = NULL;
+ char command_path[PATH_MAX] = "";
+ struct stat st = {
+ 0,
+ };
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ if (conf->op_version < 2) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
+ "Op Version not supported.");
+ snprintf(errmsg, sizeof(errmsg),
+ "One or more nodes do not"
+ " support the required op version.");
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "command", &command);
+ if (ret) {
+ strcpy(errmsg, "internal error");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get command from dict");
+ goto out;
+ }
+
+ /* enforce local occurrence of the command */
+ if (strchr(command, '/')) {
+ strcpy(errmsg, "invalid command name");
+ ret = -1;
+ goto out;
+ }
+
+ sprintf(command_path, GSYNCD_PREFIX "/peer_%s", command);
+ /* check if it's executable */
+ ret = sys_access(command_path, X_OK);
+ if (!ret)
+ /* check if it's a regular file */
+ ret = sys_stat(command_path, &st);
+ if (!ret && !S_ISREG(st.st_mode))
+ ret = -1;
out:
- if (ret) {
- if (errmsg[0] == '\0') {
- if (command)
- snprintf (errmsg, sizeof (errmsg),
- "gsync peer_%s command not found.",
- command);
- else
- snprintf (errmsg, sizeof (errmsg), "%s",
- "gsync peer command was not "
- "specified");
- }
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_CMD_ERROR,
- "%s", errmsg);
- }
-
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ if (ret) {
+ if (errmsg[0] == '\0') {
+ if (command)
+ snprintf(errmsg, sizeof(errmsg),
+ "gsync peer_%s command not found.", command);
+ else
+ snprintf(errmsg, sizeof(errmsg), "%s",
+ "gsync peer command was not "
+ "specified");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_CMD_ERROR, "%s",
+ errmsg);
+ }
+
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_op_stage_copy_file (dict_t *dict, char **op_errstr)
+glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr)
{
- char abs_filename[PATH_MAX] = "";
- char errmsg[PATH_MAX] = "";
- char *filename = NULL;
- char *host_uuid = NULL;
- char uuid_str [64] = {0};
- int ret = -1;
- glusterd_conf_t *priv = NULL;
- struct stat stbuf = {0,};
- xlator_t *this = NULL;
- char workdir[PATH_MAX] = {0,};
- char realpath_filename[PATH_MAX] = {0,};
- char realpath_workdir[PATH_MAX] = {0,};
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
-
- if (THIS)
- priv = THIS->private;
- if (priv == NULL) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
- "priv of glusterd not present");
- *op_errstr = gf_strdup ("glusterd defunct");
- goto out;
+ char abs_filename[PATH_MAX] = "";
+ char errmsg[PATH_MAX] = "";
+ char *filename = NULL;
+ char *host_uuid = NULL;
+ char uuid_str[64] = {0};
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ char workdir[PATH_MAX] = {
+ 0,
+ };
+ char realpath_filename[PATH_MAX] = {
+ 0,
+ };
+ char realpath_workdir[PATH_MAX] = {
+ 0,
+ };
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (THIS)
+ priv = THIS->private;
+ if (priv == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
+ "priv of glusterd not present");
+ *op_errstr = gf_strdup("glusterd defunct");
+ goto out;
+ }
+
+ if (priv->op_version < 2) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
+ "Op Version not supported.");
+ snprintf(errmsg, sizeof(errmsg),
+ "One or more nodes do not"
+ " support the required op version.");
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "host-uuid", &host_uuid);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch host-uuid from dict.");
+ goto out;
+ }
+
+ uuid_utoa_r(MY_UUID, uuid_str);
+ if (!strcmp(uuid_str, host_uuid)) {
+ ret = dict_get_str(dict, "source", &filename);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch filename from dict.");
+ *op_errstr = gf_strdup("command unsuccessful");
+ goto out;
}
-
- if (priv->op_version < 2) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
- "Op Version not supported.");
- snprintf (errmsg, sizeof(errmsg), "One or more nodes do not"
- " support the required op version.");
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
+ len = snprintf(abs_filename, sizeof(abs_filename), "%s/%s",
+ priv->workdir, filename);
+ if ((len < 0) || (len >= sizeof(abs_filename))) {
+ ret = -1;
+ goto out;
}
- ret = dict_get_str (dict, "host-uuid", &host_uuid);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch host-uuid from dict.");
- goto out;
+ if (!realpath(priv->workdir, realpath_workdir)) {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "Failed to "
+ "get realpath of %s: %s",
+ priv->workdir, strerror(errno));
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ if (!realpath(abs_filename, realpath_filename)) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Failed to get "
+ "realpath of %s: %s",
+ filename, strerror(errno));
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ /* Add Trailing slash to workdir, without slash strncmp
+ will succeed for /var/lib/glusterd_bad */
+ len = snprintf(workdir, sizeof(workdir), "%s/", realpath_workdir);
+ if ((len < 0) || (len >= sizeof(workdir))) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Protect against file copy outside $workdir */
+ if (strncmp(workdir, realpath_filename, strlen(workdir))) {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "Source file"
+ " is outside of %s directory",
+ priv->workdir);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
}
- uuid_utoa_r (MY_UUID, uuid_str);
- if (!strcmp (uuid_str, host_uuid)) {
- ret = dict_get_str (dict, "source", &filename);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to fetch filename from dict.");
- *op_errstr = gf_strdup ("command unsuccessful");
- goto out;
- }
- len = snprintf (abs_filename, sizeof(abs_filename),
- "%s/%s", priv->workdir, filename);
- if ((len < 0) || (len >= sizeof(abs_filename))) {
- ret = -1;
- goto out;
- }
-
- if (!realpath (priv->workdir, realpath_workdir)) {
- len = snprintf (errmsg, sizeof (errmsg), "Failed to "
- "get realpath of %s: %s",
- priv->workdir, strerror (errno));
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
- }
-
- if (!realpath (abs_filename, realpath_filename)) {
- snprintf (errmsg, sizeof (errmsg), "Failed to get "
- "realpath of %s: %s", filename,
- strerror (errno));
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
- }
-
- /* Add Trailing slash to workdir, without slash strncmp
- will succeed for /var/lib/glusterd_bad */
- len = snprintf (workdir, sizeof(workdir), "%s/",
- realpath_workdir);
- if ((len < 0) || (len >= sizeof(workdir))) {
- ret = -1;
- goto out;
- }
-
- /* Protect against file copy outside $workdir */
- if (strncmp (workdir, realpath_filename, strlen (workdir))) {
- len = snprintf (errmsg, sizeof (errmsg), "Source file"
- " is outside of %s directory",
- priv->workdir);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
- }
-
- ret = sys_lstat (abs_filename, &stbuf);
- if (ret) {
- len = snprintf (errmsg, sizeof (errmsg), "Source file"
- " does not exist in %s",
- priv->workdir);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- goto out;
- }
+ ret = sys_lstat(abs_filename, &stbuf);
+ if (ret) {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "Source file"
+ " does not exist in %s",
+ priv->workdir);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ goto out;
+ }
- if (!S_ISREG(stbuf.st_mode)) {
- snprintf (errmsg, sizeof (errmsg), "Source file"
- " is not a regular file.");
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SRC_FILE_ERROR,
- "%s", errmsg);
- ret = -1;
- goto out;
- }
+ if (!S_ISREG(stbuf.st_mode)) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Source file"
+ " is not a regular file.");
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
}
+ }
- ret = 0;
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_get_statefile_name (glusterd_volinfo_t *volinfo, char *slave,
- char *conf_path, char **statefile,
- gf_boolean_t *is_template_in_use)
+glusterd_get_statefile_name(glusterd_volinfo_t *volinfo, char *slave,
+ char *conf_path, char **statefile,
+ gf_boolean_t *is_template_in_use)
{
- char *master = NULL;
- char *buf = NULL;
- char *working_conf_path = NULL;
- char temp_conf_path[PATH_MAX] = "";
- dict_t *confd = NULL;
- glusterd_conf_t *priv = NULL;
- int ret = -1;
- struct stat stbuf = {0,};
- xlator_t *this = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (this->private);
- GF_ASSERT (volinfo);
- GF_ASSERT (conf_path);
- GF_ASSERT (is_template_in_use);
-
- master = volinfo->volname;
-
- confd = dict_new ();
- if (!confd) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
- "Unable to create new dict");
- goto out;
- }
-
- priv = THIS->private;
-
- len = snprintf (temp_conf_path, sizeof(temp_conf_path),
- "%s/"GSYNC_CONF_TEMPLATE, priv->workdir);
- if ((len < 0) || (len >= sizeof(temp_conf_path))) {
- goto out;
- }
-
- ret = sys_lstat (conf_path, &stbuf);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_CONFIG_INFO,
- "Using passed config template(%s).",
- conf_path);
- working_conf_path = conf_path;
- } else {
- gf_msg (this->name, GF_LOG_WARNING, ENOENT,
- GD_MSG_FILE_OP_FAILED,
- "Config file (%s) missing. Looking for template config"
- " file (%s)", conf_path, temp_conf_path);
- ret = sys_lstat (temp_conf_path, &stbuf);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, ENOENT,
- GD_MSG_FILE_OP_FAILED, "Template "
- "config file (%s) missing.", temp_conf_path);
- goto out;
- }
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG,
- "Using default config template(%s).", temp_conf_path);
- working_conf_path = temp_conf_path;
- *is_template_in_use = _gf_true;
- }
-
-fetch_data:
- ret = glusterd_gsync_get_config (master, slave, working_conf_path,
- confd);
+ char *master = NULL;
+ char *buf = NULL;
+ char *working_conf_path = NULL;
+ char temp_conf_path[PATH_MAX] = "";
+ dict_t *confd = NULL;
+ glusterd_conf_t *priv = NULL;
+ int ret = -1;
+ struct stat stbuf = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(this->private);
+ GF_ASSERT(volinfo);
+ GF_ASSERT(conf_path);
+ GF_ASSERT(is_template_in_use);
+
+ master = volinfo->volname;
+
+ confd = dict_new();
+ if (!confd) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Unable to create new dict");
+ goto out;
+ }
+
+ priv = THIS->private;
+
+ len = snprintf(temp_conf_path, sizeof(temp_conf_path),
+ "%s/" GSYNC_CONF_TEMPLATE, priv->workdir);
+ if ((len < 0) || (len >= sizeof(temp_conf_path))) {
+ goto out;
+ }
+
+ ret = sys_lstat(conf_path, &stbuf);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONFIG_INFO,
+ "Using passed config template(%s).", conf_path);
+ working_conf_path = conf_path;
+ } else {
+ gf_msg(this->name, GF_LOG_WARNING, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "Config file (%s) missing. Looking for template config"
+ " file (%s)",
+ conf_path, temp_conf_path);
+ ret = sys_lstat(temp_conf_path, &stbuf);
if (ret) {
- if (*is_template_in_use == _gf_false) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GET_CONFIG_INFO_FAILED,
- "Unable to get configuration data "
- "for %s(master), %s(slave). "
- "Trying template config.",
- master, slave);
- working_conf_path = temp_conf_path;
- *is_template_in_use = _gf_true;
- goto fetch_data;
- } else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GET_CONFIG_INFO_FAILED,
- "Unable to get configuration data "
- "for %s(master), %s(slave) from "
- "template config",
- master, slave);
- goto out;
- }
- }
+ gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "Template "
+ "config file (%s) missing.",
+ temp_conf_path);
+ goto out;
+ }
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG,
+ "Using default config template(%s).", temp_conf_path);
+ working_conf_path = temp_conf_path;
+ *is_template_in_use = _gf_true;
+ }
- ret = dict_get_param (confd, "state_file", &buf);
- if (ret) {
- if (*is_template_in_use == _gf_false) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get state_file's name. "
- "Trying template config.");
- working_conf_path = temp_conf_path;
- *is_template_in_use = _gf_true;
- goto fetch_data;
- } else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GET_STATEFILE_NAME_FAILED,
- "Unable to get state_file's "
- "name from template.");
- goto out;
- }
+fetch_data:
+ ret = glusterd_gsync_get_config(master, slave, working_conf_path, confd);
+ if (ret) {
+ if (*is_template_in_use == _gf_false) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED,
+ "Unable to get configuration data "
+ "for %s(master), %s(slave). "
+ "Trying template config.",
+ master, slave);
+ working_conf_path = temp_conf_path;
+ *is_template_in_use = _gf_true;
+ goto fetch_data;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED,
+ "Unable to get configuration data "
+ "for %s(master), %s(slave) from "
+ "template config",
+ master, slave);
+ goto out;
+ }
+ }
+
+ ret = dict_get_param(confd, "state_file", &buf);
+ if (ret) {
+ if (*is_template_in_use == _gf_false) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get state_file's name. "
+ "Trying template config.");
+ working_conf_path = temp_conf_path;
+ *is_template_in_use = _gf_true;
+ goto fetch_data;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_GET_STATEFILE_NAME_FAILED,
+ "Unable to get state_file's "
+ "name from template.");
+ goto out;
}
+ }
- ret = 0;
- out:
- if (buf) {
- *statefile = gf_strdup(buf);
- if (!*statefile)
- ret = -1;
- }
+ ret = 0;
+out:
+ if (buf) {
+ *statefile = gf_strdup(buf);
+ if (!*statefile)
+ ret = -1;
+ }
- if (confd)
- dict_unref (confd);
+ if (confd)
+ dict_unref(confd);
- gf_msg_debug (this->name, 0, "Returning %d ", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d ", ret);
+ return ret;
}
int
-glusterd_create_status_file (char *master, char *slave, char *slave_host,
- char *slave_vol, char *status)
+glusterd_create_status_file(char *master, char *slave, char *slave_host,
+ char *slave_vol, char *status)
{
- int ret = -1;
- runner_t runner = {0,};
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- if (THIS)
- priv = THIS->private;
- if (priv == NULL) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
- "priv of glusterd not present");
- goto out;
- }
+ int ret = -1;
+ runner_t runner = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
- if (!status) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_STATUS_NULL,
- "Status Empty");
- goto out;
- }
- gf_msg_debug (this->name, 0, "slave = %s", slave);
-
- runinit (&runner);
- runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "--create",
- status, "-c", NULL);
- runner_argprintf (&runner, "%s/"GEOREP"/%s_%s_%s/gsyncd.conf",
- priv->workdir, master, slave_host, slave_vol);
- runner_argprintf (&runner, "--iprefix=%s", DATADIR);
- runner_argprintf (&runner, ":%s", master);
- runner_add_args (&runner, slave, NULL);
- synclock_unlock (&priv->big_lock);
- ret = runner_run (&runner);
- synclock_lock (&priv->big_lock);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATUSFILE_CREATE_FAILED,
- "Creating status file failed.");
- ret = -1;
- goto out;
- }
+ this = THIS;
+ GF_ASSERT(this);
- ret = 0;
+ if (THIS)
+ priv = THIS->private;
+ if (priv == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
+ "priv of glusterd not present");
+ goto out;
+ }
+
+ if (!status) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATUS_NULL, "Status Empty");
+ goto out;
+ }
+ gf_msg_debug(this->name, 0, "slave = %s", slave);
+
+ runinit(&runner);
+ runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "--create", status, "-c",
+ NULL);
+ runner_argprintf(&runner, "%s/" GEOREP "/%s_%s_%s/gsyncd.conf",
+ priv->workdir, master, slave_host, slave_vol);
+ runner_argprintf(&runner, "--iprefix=%s", DATADIR);
+ runner_argprintf(&runner, ":%s", master);
+ runner_add_args(&runner, slave, NULL);
+ synclock_unlock(&priv->big_lock);
+ ret = runner_run(&runner);
+ synclock_lock(&priv->big_lock);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATUSFILE_CREATE_FAILED,
+ "Creating status file failed.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ return ret;
}
static int
-glusterd_verify_slave (char *volname, char *slave_url, char *slave_vol,
- int ssh_port, char **op_errstr,
- gf_boolean_t *is_force_blocker)
+glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol,
+ int ssh_port, char **op_errstr,
+ gf_boolean_t *is_force_blocker)
{
- int32_t ret = -1;
- runner_t runner = {0,};
- char log_file_path[PATH_MAX] = "";
- char buf[PATH_MAX] = "";
- char *tmp = NULL;
- char *slave_url_buf = NULL;
- char *save_ptr = NULL;
- char *slave_user = NULL;
- char *slave_ip = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (volname);
- GF_ASSERT (slave_url);
- GF_ASSERT (slave_vol);
-
- /* Fetch the slave_user and slave_ip from the slave_url.
- * If the slave_user is not present. Use "root"
- */
- if (strstr(slave_url, "@")) {
- slave_url_buf = gf_strdup (slave_url);
- if (!slave_url_buf)
- goto out;
-
- slave_user = strtok_r (slave_url_buf, "@", &save_ptr);
- slave_ip = strtok_r (NULL, "@", &save_ptr);
- } else {
- slave_user = "root";
- slave_ip = slave_url;
- }
-
- if (!slave_user || !slave_ip) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_URL_INVALID,
- "Invalid slave url.");
- goto out;
+ int32_t ret = -1;
+ runner_t runner = {
+ 0,
+ };
+ char log_file_path[PATH_MAX] = "";
+ char buf[PATH_MAX] = "";
+ char *tmp = NULL;
+ char *slave_url_buf = NULL;
+ char *save_ptr = NULL;
+ char *slave_user = NULL;
+ char *slave_ip = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(volname);
+ GF_ASSERT(slave_url);
+ GF_ASSERT(slave_vol);
+
+ /* Fetch the slave_user and slave_ip from the slave_url.
+ * If the slave_user is not present. Use "root"
+ */
+ if (strstr(slave_url, "@")) {
+ slave_url_buf = gf_strdup(slave_url);
+ if (!slave_url_buf)
+ goto out;
+
+ slave_user = strtok_r(slave_url_buf, "@", &save_ptr);
+ slave_ip = strtok_r(NULL, "@", &save_ptr);
+ } else {
+ slave_user = "root";
+ slave_ip = slave_url;
+ }
+
+ if (!slave_user || !slave_ip) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_URL_INVALID,
+ "Invalid slave url.");
+ goto out;
+ }
+
+ snprintf(log_file_path, sizeof(log_file_path),
+ DEFAULT_LOG_FILE_DIRECTORY "/create_verify_log");
+
+ runinit(&runner);
+ runner_add_args(&runner, GSYNCD_PREFIX "/gverify.sh", NULL);
+ runner_argprintf(&runner, "%s", volname);
+ runner_argprintf(&runner, "%s", slave_user);
+ runner_argprintf(&runner, "%s", slave_ip);
+ runner_argprintf(&runner, "%s", slave_vol);
+ runner_argprintf(&runner, "%d", ssh_port);
+ runner_argprintf(&runner, "%s", log_file_path);
+ gf_msg_debug(this->name, 0, "gverify Args = %s %s %s %s %s %s %s",
+ runner.argv[0], runner.argv[1], runner.argv[2], runner.argv[3],
+ runner.argv[4], runner.argv[5], runner.argv[6]);
+ runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
+ synclock_unlock(&priv->big_lock);
+ ret = runner_run(&runner);
+ synclock_lock(&priv->big_lock);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_SLAVE,
+ "Not a valid slave");
+ ret = glusterd_gsync_read_frm_status(log_file_path, buf, sizeof(buf));
+ if (ret <= 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_READ_ERROR,
+ "Unable to read from %s", log_file_path);
+ goto out;
}
- snprintf (log_file_path, sizeof(log_file_path),
- DEFAULT_LOG_FILE_DIRECTORY"/create_verify_log");
-
- runinit (&runner);
- runner_add_args (&runner, GSYNCD_PREFIX"/gverify.sh", NULL);
- runner_argprintf (&runner, "%s", volname);
- runner_argprintf (&runner, "%s", slave_user);
- runner_argprintf (&runner, "%s", slave_ip);
- runner_argprintf (&runner, "%s", slave_vol);
- runner_argprintf (&runner, "%d", ssh_port);
- runner_argprintf (&runner, "%s", log_file_path);
- gf_msg_debug (this->name, 0, "gverify Args = %s %s %s %s %s %s %s",
- runner.argv[0], runner.argv[1], runner.argv[2],
- runner.argv[3], runner.argv[4], runner.argv[5],
- runner.argv[6]);
- runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
- synclock_unlock (&priv->big_lock);
- ret = runner_run (&runner);
- synclock_lock (&priv->big_lock);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_SLAVE,
- "Not a valid slave");
- ret = glusterd_gsync_read_frm_status (log_file_path,
- buf, sizeof(buf));
- if (ret <= 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_READ_ERROR,
- "Unable to read from %s", log_file_path);
- goto out;
- }
-
- /* Tokenize the error message from gverify.sh to figure out
- * if the error is a force blocker or not. */
- tmp = strtok_r (buf, "|", &save_ptr);
- if (!tmp) {
- ret = -1;
- goto out;
- }
- if (!strcmp (tmp, "FORCE_BLOCKER"))
- *is_force_blocker = 1;
- else {
- /* No FORCE_BLOCKER flag present so all that is
- * present is the error message. */
- *is_force_blocker = 0;
- *op_errstr = gf_strdup (tmp);
- ret = -1;
- goto out;
- }
-
- /* Copy rest of the error message to op_errstr */
- tmp = strtok_r (NULL, "|", &save_ptr);
- if (tmp)
- *op_errstr = gf_strdup (tmp);
- ret = -1;
- goto out;
- }
- ret = 0;
+ /* Tokenize the error message from gverify.sh to figure out
+ * if the error is a force blocker or not. */
+ tmp = strtok_r(buf, "|", &save_ptr);
+ if (!tmp) {
+ ret = -1;
+ goto out;
+ }
+ if (!strcmp(tmp, "FORCE_BLOCKER"))
+ *is_force_blocker = 1;
+ else {
+ /* No FORCE_BLOCKER flag present so all that is
+ * present is the error message. */
+ *is_force_blocker = 0;
+ *op_errstr = gf_strdup(tmp);
+ ret = -1;
+ goto out;
+ }
+
+ /* Copy rest of the error message to op_errstr */
+ tmp = strtok_r(NULL, "|", &save_ptr);
+ if (tmp)
+ *op_errstr = gf_strdup(tmp);
+ ret = -1;
+ goto out;
+ }
+ ret = 0;
out:
- GF_FREE (slave_url_buf);
- sys_unlink (log_file_path);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ GF_FREE(slave_url_buf);
+ sys_unlink(log_file_path);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
/** @slave_ip remains unmodified */
int
-glusterd_geo_rep_parse_slave (char *slave_url,
- char **hostname, char **op_errstr)
+glusterd_geo_rep_parse_slave(char *slave_url, char **hostname, char **op_errstr)
{
- int ret = -1;
- char *tmp = NULL;
- char *save_ptr = NULL;
- char *host = NULL;
- char errmsg[PATH_MAX] = "";
- char *saved_url = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (slave_url);
- GF_ASSERT (*slave_url);
-
- saved_url = gf_strdup (slave_url);
- if (!saved_url)
- goto out;
+ int ret = -1;
+ char *tmp = NULL;
+ char *save_ptr = NULL;
+ char *host = NULL;
+ char errmsg[PATH_MAX] = "";
+ char *saved_url = NULL;
+ xlator_t *this = NULL;
- /* Checking if hostname has user specified */
- host = strstr (saved_url, "@");
- if (!host) { /* no user specified */
- if (hostname) {
- *hostname = gf_strdup (saved_url);
- if (!*hostname)
- goto out;
- }
+ this = THIS;
+ GF_ASSERT(this);
- ret = 0;
- goto out;
- } else {
- /* Moving the host past the '@' and checking if the
- * actual hostname also has '@' */
- host++;
- if (strstr (host, "@")) {
- gf_msg_debug (this->name, 0, "host = %s", host);
- ret = snprintf (errmsg, sizeof(errmsg) - 1,
- "Invalid Hostname (%s).", host);
- errmsg[ret] = '\0';
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", errmsg);
- ret = -1;
- if (op_errstr)
- *op_errstr = gf_strdup (errmsg);
- goto out;
- }
+ GF_ASSERT(slave_url);
+ GF_ASSERT(*slave_url);
- ret = -1;
+ saved_url = gf_strdup(slave_url);
+ if (!saved_url)
+ goto out;
- /**
- * preliminary check for valid slave format.
- */
- tmp = strtok_r (saved_url, "@", &save_ptr);
- tmp = strtok_r (NULL, "@", &save_ptr);
- if (!tmp)
- goto out;
- if (hostname) {
- *hostname = gf_strdup (tmp);
- if (!*hostname)
- goto out;
- }
+ /* Checking if hostname has user specified */
+ host = strstr(saved_url, "@");
+ if (!host) { /* no user specified */
+ if (hostname) {
+ *hostname = gf_strdup(saved_url);
+ if (!*hostname)
+ goto out;
}
ret = 0;
+ goto out;
+ } else {
+ /* Moving the host past the '@' and checking if the
+ * actual hostname also has '@' */
+ host++;
+ if (strstr(host, "@")) {
+ gf_msg_debug(this->name, 0, "host = %s", host);
+ ret = snprintf(errmsg, sizeof(errmsg) - 1, "Invalid Hostname (%s).",
+ host);
+ errmsg[ret] = '\0';
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
+ errmsg);
+ ret = -1;
+ if (op_errstr)
+ *op_errstr = gf_strdup(errmsg);
+ goto out;
+ }
+
+ ret = -1;
+
+ /**
+ * preliminary check for valid slave format.
+ */
+ tmp = strtok_r(saved_url, "@", &save_ptr);
+ tmp = strtok_r(NULL, "@", &save_ptr);
+ if (!tmp)
+ goto out;
+ if (hostname) {
+ *hostname = gf_strdup(tmp);
+ if (!*hostname)
+ goto out;
+ }
+ }
+
+ ret = 0;
out:
- GF_FREE (saved_url);
- if (ret)
- if (hostname)
- GF_FREE (*hostname);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ GF_FREE(saved_url);
+ if (ret)
+ if (hostname)
+ GF_FREE(*hostname);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
/* Return -1 only if there is a match in volume uuid */
static int
-get_slavehost_from_voluuid (dict_t *dict, char *key, data_t *value, void *data)
+get_slavehost_from_voluuid(dict_t *dict, char *key, data_t *value, void *data)
{
- char *slave_info = NULL;
- char *tmp = NULL;
- char *slave_host = NULL;
- xlator_t *this = NULL;
- struct slave_vol_config *slave_vol = NULL;
- int i = 0;
- int ret = -1;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- slave_vol = data;
- slave_info = value->data;
+ char *slave_info = NULL;
+ char *tmp = NULL;
+ char *slave_host = NULL;
+ xlator_t *this = NULL;
+ struct slave_vol_config *slave_vol = NULL;
+ int i = 0;
+ int ret = -1;
- gf_msg_debug (this->name, 0, "slave_info:%s !", slave_info);
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
- if (!(slave_info) || strlen (slave_info) == 0) {
- /* no slaves present, peace */
- ret = 0;
- goto out;
- }
+ slave_vol = data;
+ slave_info = value->data;
- /* slave format:
- * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */
- while (i++ < 5) {
- slave_info = strchr (slave_info, ':');
- if (slave_info)
- slave_info++;
- else
- break;
- }
+ gf_msg_debug(this->name, 0, "slave_info:%s !", slave_info);
- if (!(slave_info) || strlen(slave_info) == 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "slave_info format is wrong!");
+ if (!(slave_info) || strlen(slave_info) == 0) {
+ /* no slaves present, peace */
+ ret = 0;
+ goto out;
+ }
+
+ /* slave format:
+ * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */
+ while (i++ < 5) {
+ slave_info = strchr(slave_info, ':');
+ if (slave_info)
+ slave_info++;
+ else
+ break;
+ }
+
+ if (!(slave_info) || strlen(slave_info) == 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "slave_info format is wrong!");
+ ret = -2;
+ goto out;
+ } else {
+ if (strcmp(slave_info, slave_vol->slave_voluuid) == 0) {
+ ret = -1;
+
+ /* get corresponding slave host for reference*/
+ slave_host = value->data;
+ slave_host = strstr(slave_host, "://");
+ if (slave_host) {
+ slave_host += 3;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "Invalid slave_host format!");
ret = -2;
goto out;
- } else {
- if (strcmp (slave_info, slave_vol->slave_voluuid) == 0) {
- ret = -1;
-
- /* get corresponding slave host for reference*/
- slave_host = value->data;
- slave_host = strstr (slave_host, "://");
- if (slave_host) {
- slave_host += 3;
- } else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "Invalid slave_host format!");
- ret = -2;
- goto out;
- }
- /* To go past username in non-root geo-rep session */
- tmp = strchr (slave_host, '@');
- if (tmp) {
- if ((tmp - slave_host) >= LOGIN_NAME_MAX) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "Invalid slave user length in %s",
- slave_host);
- ret = -2;
- goto out;
- }
- strncpy (slave_vol->old_slvuser, slave_host,
- (tmp - slave_host));
- slave_vol->old_slvuser[(tmp - slave_host) + 1]
- = '\0';
- slave_host = tmp + 1;
- } else
- strcpy (slave_vol->old_slvuser, "root");
-
- tmp = strchr (slave_host, ':');
- if (!tmp) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_VOL_PARSE_FAIL,
- "Invalid slave_host!");
- ret = -2;
- goto out;
- }
-
- strncpy (slave_vol->old_slvhost, slave_host,
- (tmp - slave_host));
- slave_vol->old_slvhost[(tmp - slave_host) + 1] = '\0';
+ }
+ /* To go past username in non-root geo-rep session */
+ tmp = strchr(slave_host, '@');
+ if (tmp) {
+ if ((tmp - slave_host) >= LOGIN_NAME_MAX) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "Invalid slave user length in %s", slave_host);
+ ret = -2;
+ goto out;
+ }
+ strncpy(slave_vol->old_slvuser, slave_host, (tmp - slave_host));
+ slave_vol->old_slvuser[(tmp - slave_host) + 1] = '\0';
+ slave_host = tmp + 1;
+ } else
+ strcpy(slave_vol->old_slvuser, "root");
+
+ tmp = strchr(slave_host, ':');
+ if (!tmp) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL,
+ "Invalid slave_host!");
+ ret = -2;
+ goto out;
+ }
- goto out;
- }
+ strncpy(slave_vol->old_slvhost, slave_host, (tmp - slave_host));
+ slave_vol->old_slvhost[(tmp - slave_host) + 1] = '\0';
+
+ goto out;
}
+ }
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
/* Given slave host and slave volume, check whether slave volume uuid
@@ -3059,447 +3058,443 @@ out:
* If slave volume uuid is present, get corresponding slave host
* for reference */
static int
-glusterd_get_slavehost_from_voluuid (glusterd_volinfo_t *volinfo,
- char *slave_host, char *slave_vol,
- struct slave_vol_config *slave1)
+glusterd_get_slavehost_from_voluuid(glusterd_volinfo_t *volinfo,
+ char *slave_host, char *slave_vol,
+ struct slave_vol_config *slave1)
{
- int ret = -1;
- xlator_t *this = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
- this = THIS;
+ this = THIS;
- GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
- ret = dict_foreach (volinfo->gsync_slaves, get_slavehost_from_voluuid,
- slave1);
+ ret = dict_foreach(volinfo->gsync_slaves, get_slavehost_from_voluuid,
+ slave1);
out:
- return ret;
+ return ret;
}
int
-glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr)
+glusterd_op_stage_gsync_create(dict_t *dict, char **op_errstr)
{
- char *down_peerstr = NULL;
- char *slave = NULL;
- char *volname = NULL;
- char *host_uuid = NULL;
- char *statefile = NULL;
- char *slave_url = NULL;
- char *slave_host = NULL;
- char *slave_vol = NULL;
- char *conf_path = NULL;
- char errmsg[PATH_MAX] = "";
- char common_pem_file[PATH_MAX] = "";
- char hook_script[PATH_MAX] = "";
- char uuid_str [64] = "";
- int ret = -1;
- int is_pem_push = -1;
- int ssh_port = 22;
- gf_boolean_t is_force = -1;
- gf_boolean_t is_no_verify = -1;
- gf_boolean_t is_force_blocker = -1;
- gf_boolean_t exists = _gf_false;
- gf_boolean_t is_template_in_use = _gf_false;
- glusterd_conf_t *conf = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- struct stat stbuf = {0,};
- xlator_t *this = NULL;
- struct slave_vol_config slave1 = {{0},};
- char old_slave_url[SLAVE_URL_INFO_MAX] = {0};
- char old_confpath[PATH_MAX] = {0};
- gf_boolean_t is_running = _gf_false;
- char *statedir = NULL;
- char statefiledir[PATH_MAX] = {0,};
- gf_boolean_t is_different_slavehost = _gf_false;
- gf_boolean_t is_different_username = _gf_false;
- char *slave_user = NULL;
- char *save_ptr = NULL;
- char *slave_url_buf = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- ret = glusterd_op_gsync_args_get (dict, op_errstr, &volname,
- &slave, &host_uuid);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_ARG_FETCH_ERROR,
- "Unable to fetch arguments");
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return -1;
+ char *down_peerstr = NULL;
+ char *slave = NULL;
+ char *volname = NULL;
+ char *host_uuid = NULL;
+ char *statefile = NULL;
+ char *slave_url = NULL;
+ char *slave_host = NULL;
+ char *slave_vol = NULL;
+ char *conf_path = NULL;
+ char errmsg[PATH_MAX] = "";
+ char common_pem_file[PATH_MAX] = "";
+ char hook_script[PATH_MAX] = "";
+ char uuid_str[64] = "";
+ int ret = -1;
+ int is_pem_push = -1;
+ int ssh_port = 22;
+ gf_boolean_t is_force = -1;
+ gf_boolean_t is_no_verify = -1;
+ gf_boolean_t is_force_blocker = -1;
+ gf_boolean_t exists = _gf_false;
+ gf_boolean_t is_template_in_use = _gf_false;
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ struct slave_vol_config slave1 = {
+ {0},
+ };
+ char old_slave_url[SLAVE_URL_INFO_MAX] = {0};
+ char old_confpath[PATH_MAX] = {0};
+ gf_boolean_t is_running = _gf_false;
+ char *statedir = NULL;
+ char statefiledir[PATH_MAX] = {
+ 0,
+ };
+ gf_boolean_t is_different_slavehost = _gf_false;
+ gf_boolean_t is_different_username = _gf_false;
+ char *slave_user = NULL;
+ char *save_ptr = NULL;
+ char *slave_url_buf = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ ret = glusterd_op_gsync_args_get(dict, op_errstr, &volname, &slave,
+ &host_uuid);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_ARG_FETCH_ERROR,
+ "Unable to fetch arguments");
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return -1;
+ }
+
+ if (conf->op_version < 2) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
+ "Op Version not supported.");
+ snprintf(errmsg, sizeof(errmsg),
+ "One or more nodes do not"
+ " support the required op version.");
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ exists = glusterd_check_volume_exists(volname);
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if ((ret) || (!exists)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
+ "volume name does not exist");
+ snprintf(errmsg, sizeof(errmsg),
+ "Volume name %s does not"
+ " exist",
+ volname);
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return -1;
+ }
+
+ ret = glusterd_get_slave_details_confpath(volinfo, dict, &slave_url,
+ &slave_host, &slave_vol,
+ &conf_path, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch slave or confpath details.");
+ ret = -1;
+ goto out;
+ }
+
+ is_force = dict_get_str_boolean(dict, "force", _gf_false);
+
+ uuid_utoa_r(MY_UUID, uuid_str);
+ if (!strcmp(uuid_str, host_uuid)) {
+ ret = glusterd_are_vol_all_peers_up(volinfo, &conf->peers,
+ &down_peerstr);
+ if ((ret == _gf_false) && !is_force) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Peer %s,"
+ " which is a part of %s volume, is"
+ " down. Please bring up the peer and"
+ " retry.",
+ down_peerstr, volinfo->volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_DISCONNECTED, "%s",
+ errmsg);
+ *op_errstr = gf_strdup(errmsg);
+ GF_FREE(down_peerstr);
+ down_peerstr = NULL;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return -1;
+ } else if (ret == _gf_false) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_PEER_DISCONNECTED,
+ "Peer %s, which is a part of %s volume, is"
+ " down. Force creating geo-rep session."
+ " On bringing up the peer, re-run"
+ " \"gluster system:: execute"
+ " gsec_create\" and \"gluster volume"
+ " geo-replication %s %s create push-pem"
+ " force\"",
+ down_peerstr, volinfo->volname, volinfo->volname, slave);
+ GF_FREE(down_peerstr);
+ down_peerstr = NULL;
+ }
+
+ ret = dict_get_int32(dict, "ssh_port", &ssh_port);
+ if (ret < 0 && ret != -ENOENT) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Fetching ssh_port failed while "
+ "handling " GEOREP " options");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+
+ is_no_verify = dict_get_str_boolean(dict, "no_verify", _gf_false);
+
+ if (!is_no_verify) {
+ /* Checking if slave host is pingable, has proper passwordless
+ * ssh login setup, slave volume is created, slave vol is empty,
+ * and if it has enough memory and bypass in case of force if
+ * the error is not a force blocker */
+ ret = glusterd_verify_slave(volname, slave_url, slave_vol, ssh_port,
+ op_errstr, &is_force_blocker);
+ if (ret) {
+ if (is_force && !is_force_blocker) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_INVALID_SLAVE,
+ "%s is not a valid slave "
+ "volume. Error: %s. Force "
+ "creating geo-rep"
+ " session.",
+ slave, *op_errstr);
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_SLAVE,
+ "%s is not a valid slave "
+ "volume. Error: %s",
+ slave, *op_errstr);
+ ret = -1;
+
+ goto out;
+ }
+ }
}
- if (conf->op_version < 2) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
- "Op Version not supported.");
- snprintf (errmsg, sizeof(errmsg), "One or more nodes do not"
- " support the required op version.");
- *op_errstr = gf_strdup (errmsg);
+ ret = dict_get_int32(dict, "push_pem", &is_pem_push);
+ if (!ret && is_pem_push) {
+ ret = snprintf(common_pem_file, sizeof(common_pem_file),
+ "%s" GLUSTERD_COMMON_PEM_PUB_FILE, conf->workdir);
+ if ((ret < 0) || (ret >= sizeof(common_pem_file))) {
ret = -1;
goto out;
- }
-
- exists = glusterd_check_volume_exists (volname);
- ret = glusterd_volinfo_find (volname, &volinfo);
- if ((ret) || (!exists)) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
- "volume name does not exist");
- snprintf (errmsg, sizeof(errmsg), "Volume name %s does not"
- " exist", volname);
- *op_errstr = gf_strdup (errmsg);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return -1;
- }
+ }
- ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_url,
- &slave_host, &slave_vol,
- &conf_path, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "Unable to fetch slave or confpath details.");
+ ret = snprintf(hook_script, sizeof(hook_script),
+ "%s" GLUSTERD_CREATE_HOOK_SCRIPT, conf->workdir);
+ if ((ret < 0) || (ret >= sizeof(hook_script))) {
ret = -1;
goto out;
- }
-
- is_force = dict_get_str_boolean (dict, "force", _gf_false);
-
- uuid_utoa_r (MY_UUID, uuid_str);
- if (!strcmp (uuid_str, host_uuid)) {
- ret = glusterd_are_vol_all_peers_up (volinfo,
- &conf->peers,
- &down_peerstr);
- if ((ret == _gf_false) && !is_force) {
- snprintf (errmsg, sizeof (errmsg), "Peer %s,"
- " which is a part of %s volume, is"
- " down. Please bring up the peer and"
- " retry.", down_peerstr,
- volinfo->volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PEER_DISCONNECTED,
- "%s", errmsg);
- *op_errstr = gf_strdup (errmsg);
- GF_FREE (down_peerstr);
- down_peerstr = NULL;
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return -1;
- } else if (ret == _gf_false) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_PEER_DISCONNECTED,
- "Peer %s, which is a part of %s volume, is"
- " down. Force creating geo-rep session."
- " On bringing up the peer, re-run"
- " \"gluster system:: execute"
- " gsec_create\" and \"gluster volume"
- " geo-replication %s %s create push-pem"
- " force\"", down_peerstr, volinfo->volname,
- volinfo->volname, slave);
- GF_FREE (down_peerstr);
- down_peerstr = NULL;
- }
-
- ret = dict_get_int32 (dict, "ssh_port", &ssh_port);
- if (ret < 0 && ret != -ENOENT) {
- snprintf (errmsg, sizeof (errmsg),
- "Fetching ssh_port failed while "
- "handling "GEOREP" options");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", errmsg);
- goto out;
- }
+ }
- is_no_verify = dict_get_str_boolean (dict, "no_verify", _gf_false);
-
- if (!is_no_verify) {
- /* Checking if slave host is pingable, has proper passwordless
- * ssh login setup, slave volume is created, slave vol is empty,
- * and if it has enough memory and bypass in case of force if
- * the error is not a force blocker */
- ret = glusterd_verify_slave (volname, slave_url, slave_vol,
- ssh_port, op_errstr,
- &is_force_blocker);
- if (ret) {
- if (is_force && !is_force_blocker) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_INVALID_SLAVE,
- "%s is not a valid slave "
- "volume. Error: %s. Force "
- "creating geo-rep"
- " session.", slave,
- *op_errstr);
- } else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_INVALID_SLAVE,
- "%s is not a valid slave "
- "volume. Error: %s",
- slave, *op_errstr);
- ret = -1;
-
- goto out;
- }
- }
+ ret = sys_lstat(common_pem_file, &stbuf);
+ if (ret) {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "%s"
+ " required for push-pem is"
+ " not present. Please run"
+ " \"gluster system:: execute"
+ " gsec_create\"",
+ common_pem_file);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
}
+ gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "%s", errmsg);
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
- ret = dict_get_int32 (dict, "push_pem", &is_pem_push);
- if (!ret && is_pem_push) {
- ret = snprintf (common_pem_file,
- sizeof(common_pem_file),
- "%s"GLUSTERD_COMMON_PEM_PUB_FILE,
- conf->workdir);
- if ((ret < 0) || (ret >= sizeof(common_pem_file))) {
- ret = -1;
- goto out;
- }
-
- ret = snprintf (hook_script, sizeof(hook_script),
- "%s"GLUSTERD_CREATE_HOOK_SCRIPT,
- conf->workdir);
- if ((ret < 0) || (ret >= sizeof(hook_script))) {
- ret = -1;
- goto out;
- }
-
- ret = sys_lstat (common_pem_file, &stbuf);
- if (ret) {
- len = snprintf (errmsg, sizeof (errmsg), "%s"
- " required for push-pem is"
- " not present. Please run"
- " \"gluster system:: execute"
- " gsec_create\"",
- common_pem_file);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- gf_msg (this->name, GF_LOG_ERROR, ENOENT,
- GD_MSG_FILE_OP_FAILED,
- "%s", errmsg);
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
- }
-
- ret = sys_lstat (hook_script, &stbuf);
- if (ret) {
- len = snprintf (errmsg, sizeof (errmsg),
- "The hook-script (%s) "
- "required for push-pem is not "
- "present. Please install the "
- "hook-script and retry",
- hook_script);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- gf_msg (this->name, GF_LOG_ERROR, ENOENT,
- GD_MSG_FILE_OP_FAILED, "%s", errmsg);
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
- }
-
- if (!S_ISREG(stbuf.st_mode)) {
- len = snprintf (errmsg, sizeof (errmsg), "%s"
- " required for push-pem is"
- " not a regular file. Please"
- " run \"gluster system:: "
- "execute gsec_create\"",
- common_pem_file);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REG_FILE_MISSING, "%s", errmsg);
- ret = -1;
- goto out;
- }
+ ret = sys_lstat(hook_script, &stbuf);
+ if (ret) {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "The hook-script (%s) "
+ "required for push-pem is not "
+ "present. Please install the "
+ "hook-script and retry",
+ hook_script);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
}
- }
-
- ret = glusterd_get_statefile_name (volinfo, slave,
- conf_path, &statefile,
- &is_template_in_use);
- if (ret) {
- if (!strstr(slave, "::"))
- snprintf (errmsg, sizeof (errmsg),
- "%s is not a valid slave url.", slave);
- else
- snprintf (errmsg, sizeof (errmsg), "Please check gsync "
- "config file. Unable to get statefile's name");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATEFILE_NAME_NOT_FOUND,
- "%s", errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "%s", errmsg);
+ *op_errstr = gf_strdup(errmsg);
ret = -1;
goto out;
- }
+ }
- ret = dict_set_str (dict, "statefile", statefile);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Unable to store statefile path");
+ if (!S_ISREG(stbuf.st_mode)) {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "%s"
+ " required for push-pem is"
+ " not a regular file. Please"
+ " run \"gluster system:: "
+ "execute gsec_create\"",
+ common_pem_file);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REG_FILE_MISSING,
+ "%s", errmsg);
+ ret = -1;
goto out;
+ }
}
+ }
- if (snprintf (statefiledir, sizeof (statefiledir), "%s", statefile) >=
- sizeof (statefiledir)) {
- snprintf (errmsg, sizeof (errmsg),
- "Failed copying statefiledir");
- goto out;
- }
- statedir = dirname (statefiledir);
+ ret = glusterd_get_statefile_name(volinfo, slave, conf_path, &statefile,
+ &is_template_in_use);
+ if (ret) {
+ if (!strstr(slave, "::"))
+ snprintf(errmsg, sizeof(errmsg), "%s is not a valid slave url.",
+ slave);
+ else
+ snprintf(errmsg, sizeof(errmsg),
+ "Please check gsync "
+ "config file. Unable to get statefile's name");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATEFILE_NAME_NOT_FOUND,
+ "%s", errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "statefile", statefile);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to store statefile path");
+ goto out;
+ }
+
+ if (snprintf(statefiledir, sizeof(statefiledir), "%s", statefile) >=
+ sizeof(statefiledir)) {
+ snprintf(errmsg, sizeof(errmsg), "Failed copying statefiledir");
+ goto out;
+ }
+ statedir = dirname(statefiledir);
+
+ ret = sys_lstat(statedir, &stbuf);
+ if (!ret && !is_force) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Session between %s"
+ " and %s is already created.",
+ volinfo->volname, slave);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_ALREADY_EXIST, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
+ } else if (!ret)
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION,
+ "Session between %s and %s is already created. Force"
+ " creating again.",
+ volinfo->volname, slave);
+
+ ret = glusterd_get_slave_voluuid(slave_host, slave_vol,
+ slave1.slave_voluuid);
+ if ((ret) || (strlen(slave1.slave_voluuid) == 0)) {
+ snprintf(errmsg, sizeof(errmsg), "Unable to get remote volume uuid.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr_with_alloc(dict, "slave_voluuid",
+ slave1.slave_voluuid);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set slave volume uuid in the dict");
+ goto out;
+ }
+
+ /* Check whether session is already created using slave volume uuid */
+ ret = glusterd_get_slavehost_from_voluuid(volinfo, slave_host, slave_vol,
+ &slave1);
+ if (ret == -1) {
+ if (!is_force) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Session between %s"
+ " and %s:%s is already created! Cannot create "
+ "with new slave:%s again!",
+ volinfo->volname, slave1.old_slvhost, slave_vol,
+ slave_host);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FORCE_CREATE_SESSION,
+ "Session between"
+ " %s and %s:%s is already created! "
+ "Cannot create with new slave:%s again!",
+ volinfo->volname, slave1.old_slvhost, slave_vol, slave_host);
+ goto out;
+ }
+
+ /* Now, check whether session is already started.If so, warn!*/
+ is_different_slavehost = (strcmp(slave_host, slave1.old_slvhost) != 0)
+ ? _gf_true
+ : _gf_false;
- ret = sys_lstat (statedir, &stbuf);
- if (!ret && !is_force) {
- snprintf (errmsg, sizeof (errmsg), "Session between %s"
- " and %s is already created.",
- volinfo->volname, slave);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SESSION_ALREADY_EXIST,
- "%s", errmsg);
+ if (strstr(slave_url, "@")) {
+ slave_url_buf = gf_strdup(slave_url);
+ if (!slave_url_buf) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Unable to allocate memory");
ret = -1;
goto out;
- } else if (!ret)
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION,
- "Session between %s and %s is already created. Force"
- " creating again.", volinfo->volname, slave);
-
- ret = glusterd_get_slave_voluuid (slave_host, slave_vol,
- slave1.slave_voluuid);
- if ((ret) || (strlen(slave1.slave_voluuid) == 0)) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to get remote volume uuid.");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REMOTE_VOL_UUID_FAIL, "%s", errmsg);
+ }
+ slave_user = strtok_r(slave_url_buf, "@", &save_ptr);
+ } else
+ slave_user = "root";
+ is_different_username = (strcmp(slave_user, slave1.old_slvuser) != 0)
+ ? _gf_true
+ : _gf_false;
+
+ /* Do the check, only if different slave host/slave user */
+ if (is_different_slavehost || is_different_username) {
+ len = snprintf(old_confpath, sizeof(old_confpath),
+ "%s/" GEOREP "/%s_%s_%s/gsyncd.conf", conf->workdir,
+ volinfo->volname, slave1.old_slvhost, slave_vol);
+ if ((len < 0) || (len >= sizeof(old_confpath))) {
ret = -1;
goto out;
- }
+ }
- ret = dict_set_dynstr_with_alloc (dict, "slave_voluuid",
- slave1.slave_voluuid);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Unable to set slave volume uuid in the dict");
+ /* construct old slave url with (old) slave host */
+ len = snprintf(old_slave_url, sizeof(old_slave_url), "%s::%s",
+ slave1.old_slvhost, slave_vol);
+ if ((len < 0) || (len >= sizeof(old_slave_url))) {
+ ret = -1;
goto out;
- }
-
- /* Check whether session is already created using slave volume uuid */
- ret = glusterd_get_slavehost_from_voluuid (volinfo, slave_host,
- slave_vol, &slave1);
- if (ret == -1) {
- if (!is_force) {
- snprintf (errmsg, sizeof (errmsg), "Session between %s"
- " and %s:%s is already created! Cannot create "
- "with new slave:%s again!",
- volinfo->volname, slave1.old_slvhost,
- slave_vol, slave_host);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FORCE_CREATE_SESSION, "Session between"
- " %s and %s:%s is already created! "
- "Cannot create with new slave:%s again!",
- volinfo->volname, slave1.old_slvhost,
- slave_vol, slave_host);
- goto out;
- }
-
- /* Now, check whether session is already started.If so, warn!*/
- is_different_slavehost =
- (strcmp (slave_host, slave1.old_slvhost) != 0)
- ? _gf_true : _gf_false;
-
- if (strstr (slave_url, "@")) {
- slave_url_buf = gf_strdup (slave_url);
- if (!slave_url_buf) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY,
- "Unable to allocate memory");
- ret = -1;
- goto out;
- }
- slave_user = strtok_r (slave_url_buf, "@", &save_ptr);
- } else
- slave_user = "root";
- is_different_username =
- (strcmp (slave_user, slave1.old_slvuser) != 0)
- ? _gf_true : _gf_false;
-
- /* Do the check, only if different slave host/slave user */
- if (is_different_slavehost || is_different_username) {
- len = snprintf (old_confpath, sizeof(old_confpath),
- "%s/"GEOREP"/%s_%s_%s/gsyncd.conf",
- conf->workdir, volinfo->volname,
- slave1.old_slvhost, slave_vol);
- if ((len < 0) || (len >= sizeof(old_confpath))) {
- ret = -1;
- goto out;
- }
-
- /* construct old slave url with (old) slave host */
- len = snprintf (old_slave_url, sizeof(old_slave_url),
- "%s::%s", slave1.old_slvhost,
- slave_vol);
- if ((len < 0) || (len >= sizeof(old_slave_url))) {
- ret = -1;
- goto out;
- }
-
- ret = glusterd_check_gsync_running_local (volinfo->volname,
- old_slave_url, old_confpath, &is_running);
- if (_gf_true == is_running) {
- (void) snprintf (errmsg, sizeof(errmsg), "Geo"
- "-replication session between %s and %s"
- " is still active. Please stop the "
- "session and retry.",
- volinfo->volname, old_slave_url);
- ret = -1;
- goto out;
- }
- }
-
- ret = dict_set_dynstr_with_alloc (dict, "old_slavehost",
- slave1.old_slvhost);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Unable to set old_slavehost in the dict");
- goto out;
- }
+ }
- ret = dict_set_int32 (dict, "existing_session", _gf_true);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Unable to set existing_session in the dict");
- goto out;
- }
- } else if (ret == -2) {
- snprintf (errmsg, sizeof (errmsg), "get_slavehost_from_voluuid"
- " failed for %s::%s. Please check the glusterd logs.",
- slave_host, slave_vol);
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION,
- "get_slavehost_from_voluuid failed %s %s!!",
- slave_host, slave_vol);
+ ret = glusterd_check_gsync_running_local(
+ volinfo->volname, old_slave_url, old_confpath, &is_running);
+ if (_gf_true == is_running) {
+ (void)snprintf(errmsg, sizeof(errmsg),
+ "Geo"
+ "-replication session between %s and %s"
+ " is still active. Please stop the "
+ "session and retry.",
+ volinfo->volname, old_slave_url);
+ ret = -1;
goto out;
+ }
}
- ret = glusterd_verify_gsyncd_spawn (volinfo->volname, slave);
+ ret = dict_set_dynstr_with_alloc(dict, "old_slavehost",
+ slave1.old_slvhost);
if (ret) {
- snprintf (errmsg, sizeof (errmsg), "Unable to spawn gsyncd.");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED,
- "%s", errmsg);
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set old_slavehost in the dict");
+ goto out;
}
- ret = 0;
+ ret = dict_set_int32(dict, "existing_session", _gf_true);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set existing_session in the dict");
+ goto out;
+ }
+ } else if (ret == -2) {
+ snprintf(errmsg, sizeof(errmsg),
+ "get_slavehost_from_voluuid"
+ " failed for %s::%s. Please check the glusterd logs.",
+ slave_host, slave_vol);
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION,
+ "get_slavehost_from_voluuid failed %s %s!!", slave_host,
+ slave_vol);
+ goto out;
+ }
+
+ ret = glusterd_verify_gsyncd_spawn(volinfo->volname, slave);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg), "Unable to spawn gsyncd.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+
+ ret = 0;
out:
- if (ret && errmsg[0] != '\0')
- *op_errstr = gf_strdup (errmsg);
+ if (ret && errmsg[0] != '\0')
+ *op_errstr = gf_strdup(errmsg);
- if (slave_url_buf)
- GF_FREE (slave_url_buf);
+ if (slave_url_buf)
+ GF_FREE(slave_url_buf);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
/* pre-condition check for geo-rep pause/resume.
@@ -3507,633 +3502,644 @@ out:
* -1 on any check failed.
*/
static int
-gd_pause_resume_validation (int type, glusterd_volinfo_t *volinfo,
- char *slave, char *statefile, char **op_errstr)
+gd_pause_resume_validation(int type, glusterd_volinfo_t *volinfo, char *slave,
+ char *statefile, char **op_errstr)
{
- int ret = 0;
- char errmsg[PATH_MAX] = {0,};
- char monitor_status[NAME_MAX] = {0,};
-
- GF_ASSERT (volinfo);
- GF_ASSERT (slave);
- GF_ASSERT (statefile);
- GF_ASSERT (op_errstr);
-
- ret = glusterd_gsync_read_frm_status (statefile, monitor_status,
- sizeof (monitor_status));
- if (ret <= 0) {
- snprintf (errmsg, sizeof(errmsg), "Pause check Failed:"
- " Geo-rep session is not setup");
- ret = -1;
- goto out;
- }
-
- if ( type == GF_GSYNC_OPTION_TYPE_PAUSE &&
- strstr (monitor_status, "Paused")) {
- snprintf (errmsg, sizeof(errmsg), "Geo-replication"
- " session between %s and %s already Paused.",
- volinfo->volname, slave);
- ret = -1;
- goto out;
- }
- if ( type == GF_GSYNC_OPTION_TYPE_RESUME &&
- !strstr (monitor_status, "Paused")) {
- snprintf (errmsg, sizeof(errmsg), "Geo-replication"
- " session between %s and %s is not Paused.",
- volinfo->volname, slave);
- ret = -1;
- goto out;
- }
- ret = 0;
+ int ret = 0;
+ char errmsg[PATH_MAX] = {
+ 0,
+ };
+ char monitor_status[NAME_MAX] = {
+ 0,
+ };
+
+ GF_ASSERT(volinfo);
+ GF_ASSERT(slave);
+ GF_ASSERT(statefile);
+ GF_ASSERT(op_errstr);
+
+ ret = glusterd_gsync_read_frm_status(statefile, monitor_status,
+ sizeof(monitor_status));
+ if (ret <= 0) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Pause check Failed:"
+ " Geo-rep session is not setup");
+ ret = -1;
+ goto out;
+ }
+
+ if (type == GF_GSYNC_OPTION_TYPE_PAUSE &&
+ strstr(monitor_status, "Paused")) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Geo-replication"
+ " session between %s and %s already Paused.",
+ volinfo->volname, slave);
+ ret = -1;
+ goto out;
+ }
+ if (type == GF_GSYNC_OPTION_TYPE_RESUME &&
+ !strstr(monitor_status, "Paused")) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Geo-replication"
+ " session between %s and %s is not Paused.",
+ volinfo->volname, slave);
+ ret = -1;
+ goto out;
+ }
+ ret = 0;
out:
- if (ret && (errmsg[0] != '\0')) {
- *op_errstr = gf_strdup (errmsg);
- }
- return ret;
+ if (ret && (errmsg[0] != '\0')) {
+ *op_errstr = gf_strdup(errmsg);
+ }
+ return ret;
}
int
-glusterd_op_stage_gsync_set (dict_t *dict, char **op_errstr)
+glusterd_op_stage_gsync_set(dict_t *dict, char **op_errstr)
{
- int ret = 0;
- int type = 0;
- char *volname = NULL;
- char *slave = NULL;
- char *slave_url = NULL;
- char *slave_host = NULL;
- char *slave_vol = NULL;
- char *down_peerstr = NULL;
- char *statefile = NULL;
- char statefiledir[PATH_MAX] = {0,};
- char *statedir = NULL;
- char *path_list = NULL;
- char *conf_path = NULL;
- gf_boolean_t exists = _gf_false;
- glusterd_volinfo_t *volinfo = NULL;
- char errmsg[PATH_MAX] = {0,};
- dict_t *ctx = NULL;
- gf_boolean_t is_force = 0;
- gf_boolean_t is_running = _gf_false;
- gf_boolean_t is_template_in_use = _gf_false;
- uuid_t uuid = {0};
- char uuid_str [64] = {0};
- char *host_uuid = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- struct stat stbuf = {0,};
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- ret = dict_get_int32 (dict, "type", &type);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
- "command type not found");
- *op_errstr = gf_strdup ("command unsuccessful");
+ int ret = 0;
+ int type = 0;
+ char *volname = NULL;
+ char *slave = NULL;
+ char *slave_url = NULL;
+ char *slave_host = NULL;
+ char *slave_vol = NULL;
+ char *down_peerstr = NULL;
+ char *statefile = NULL;
+ char statefiledir[PATH_MAX] = {
+ 0,
+ };
+ char *statedir = NULL;
+ char *path_list = NULL;
+ char *conf_path = NULL;
+ gf_boolean_t exists = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
+ char errmsg[PATH_MAX] = {
+ 0,
+ };
+ dict_t *ctx = NULL;
+ gf_boolean_t is_force = 0;
+ gf_boolean_t is_running = _gf_false;
+ gf_boolean_t is_template_in_use = _gf_false;
+ uuid_t uuid = {0};
+ char uuid_str[64] = {0};
+ char *host_uuid = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ ret = dict_get_int32(dict, "type", &type);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "command type not found");
+ *op_errstr = gf_strdup("command unsuccessful");
+ goto out;
+ }
+
+ if (type == GF_GSYNC_OPTION_TYPE_STATUS) {
+ ret = glusterd_verify_gsync_status_opts(dict, op_errstr);
+ goto out;
+ }
+
+ ret = glusterd_op_gsync_args_get(dict, op_errstr, &volname, &slave,
+ &host_uuid);
+ if (ret)
+ goto out;
+
+ uuid_utoa_r(MY_UUID, uuid_str);
+
+ if (conf->op_version < 2) {
+ snprintf(errmsg, sizeof(errmsg),
+ "One or more nodes do not"
+ " support the required op version.");
+ ret = -1;
+ goto out;
+ }
+
+ exists = glusterd_check_volume_exists(volname);
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if ((ret) || (!exists)) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Volume name %s does not"
+ " exist",
+ volname);
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_get_slave_details_confpath(volinfo, dict, &slave_url,
+ &slave_host, &slave_vol,
+ &conf_path, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch slave or confpath details.");
+ ret = -1;
+ goto out;
+ }
+
+ is_force = dict_get_str_boolean(dict, "force", _gf_false);
+
+ ret = glusterd_get_statefile_name(volinfo, slave, conf_path, &statefile,
+ &is_template_in_use);
+ if (ret) {
+ if (!strstr(slave, "::")) {
+ snprintf(errmsg, sizeof(errmsg), "%s is not a valid slave url.",
+ slave);
+ ret = -1;
+ goto out;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_URL_INVALID,
+ "state_file entry missing in config file (%s)", conf_path);
+
+ if ((type == GF_GSYNC_OPTION_TYPE_STOP) && is_force) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_STOP_FORCE,
+ "Allowing stop "
+ "force to bypass missing statefile "
+ "entry in config file (%s), and "
+ "template file",
+ conf_path);
+ ret = 0;
+ } else
goto out;
}
-
- if (type == GF_GSYNC_OPTION_TYPE_STATUS) {
- ret = glusterd_verify_gsync_status_opts (dict, op_errstr);
- goto out;
+ } else {
+ ret = dict_set_str(dict, "statefile", statefile);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to store statefile path");
+ goto out;
}
+ }
- ret = glusterd_op_gsync_args_get (dict, op_errstr,
- &volname, &slave, &host_uuid);
- if (ret)
- goto out;
-
- uuid_utoa_r (MY_UUID, uuid_str);
-
- if (conf->op_version < 2) {
- snprintf (errmsg, sizeof(errmsg), "One or more nodes do not"
- " support the required op version.");
- ret = -1;
- goto out;
+ /* Allowing stop force to bypass the statefile check
+ * as this command acts as a fail safe method to stop geo-rep
+ * session. */
+ if (!((type == GF_GSYNC_OPTION_TYPE_STOP) && is_force)) {
+ /* check session directory as statefile may not present
+ * during upgrade */
+ if (snprintf(statefiledir, sizeof(statefiledir), "%s", statefile) >=
+ sizeof(statefiledir)) {
+ snprintf(errmsg, sizeof(errmsg), "Failed copying statefiledir");
+ ret = -1;
+ goto out;
}
+ statedir = dirname(statefiledir);
- exists = glusterd_check_volume_exists (volname);
- ret = glusterd_volinfo_find (volname, &volinfo);
- if ((ret) || (!exists)) {
- snprintf (errmsg, sizeof(errmsg), "Volume name %s does not"
- " exist", volname);
+ ret = sys_lstat(statedir, &stbuf);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Geo-replication"
+ " session between %s and %s does not exist.",
+ volinfo->volname, slave);
+ gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "%s. statefile = %s", errmsg, statefile);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ /* Check if all peers that are a part of the volume are up or not */
+ if ((type == GF_GSYNC_OPTION_TYPE_DELETE) ||
+ ((type == GF_GSYNC_OPTION_TYPE_STOP) && !is_force) ||
+ (type == GF_GSYNC_OPTION_TYPE_PAUSE) ||
+ (type == GF_GSYNC_OPTION_TYPE_RESUME)) {
+ if (!strcmp(uuid_str, host_uuid)) {
+ ret = glusterd_are_vol_all_peers_up(volinfo, &conf->peers,
+ &down_peerstr);
+ if (ret == _gf_false) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Peer %s,"
+ " which is a part of %s volume, is"
+ " down. Please bring up the peer and"
+ " retry.",
+ down_peerstr, volinfo->volname);
ret = -1;
+ GF_FREE(down_peerstr);
+ down_peerstr = NULL;
goto out;
+ }
}
+ }
- ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_url,
- &slave_host, &slave_vol,
- &conf_path, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "Unable to fetch slave or confpath details.");
+ switch (type) {
+ case GF_GSYNC_OPTION_TYPE_START:
+ if (is_template_in_use) {
+ snprintf(errmsg, sizeof(errmsg),
+ "state-file entry "
+ "missing in the config file(%s).",
+ conf_path);
ret = -1;
goto out;
- }
-
- is_force = dict_get_str_boolean (dict, "force", _gf_false);
-
- ret = glusterd_get_statefile_name (volinfo, slave,
- conf_path, &statefile,
- &is_template_in_use);
- if (ret) {
- if (!strstr(slave, "::")) {
- snprintf (errmsg, sizeof(errmsg),
- "%s is not a valid slave url.", slave);
- ret = -1;
- goto out;
- } else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_URL_INVALID,
- "state_file entry missing in config file (%s)",
- conf_path);
-
- if ((type == GF_GSYNC_OPTION_TYPE_STOP) && is_force) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_STOP_FORCE, "Allowing stop "
- "force to bypass missing statefile "
- "entry in config file (%s), and "
- "template file", conf_path);
- ret = 0;
- } else
- goto out;
- }
- } else {
- ret = dict_set_str (dict, "statefile", statefile);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Unable to store statefile path");
- goto out;
- }
- }
-
- /* Allowing stop force to bypass the statefile check
- * as this command acts as a fail safe method to stop geo-rep
- * session. */
- if (!((type == GF_GSYNC_OPTION_TYPE_STOP) && is_force)) {
-
- /* check session directory as statefile may not present
- * during upgrade */
- if (snprintf (statefiledir , sizeof (statefiledir), "%s",
- statefile) >= sizeof (statefiledir)) {
- snprintf (errmsg, sizeof (errmsg),
- "Failed copying statefiledir");
- ret = -1;
- goto out;
- }
- statedir = dirname (statefiledir);
-
- ret = sys_lstat (statedir, &stbuf);
- if (ret) {
- snprintf (errmsg, sizeof(errmsg), "Geo-replication"
- " session between %s and %s does not exist.",
- volinfo->volname, slave);
- gf_msg (this->name, GF_LOG_ERROR, ENOENT,
- GD_MSG_FILE_OP_FAILED,
- "%s. statefile = %s", errmsg, statefile);
- ret = -1;
- goto out;
- }
- }
+ }
- /* Check if all peers that are a part of the volume are up or not */
- if ((type == GF_GSYNC_OPTION_TYPE_DELETE) ||
- ((type == GF_GSYNC_OPTION_TYPE_STOP) && !is_force) ||
- (type == GF_GSYNC_OPTION_TYPE_PAUSE) ||
- (type == GF_GSYNC_OPTION_TYPE_RESUME)) {
- if (!strcmp (uuid_str, host_uuid)) {
- ret = glusterd_are_vol_all_peers_up (volinfo,
- &conf->peers,
- &down_peerstr);
- if (ret == _gf_false) {
- snprintf (errmsg, sizeof (errmsg), "Peer %s,"
- " which is a part of %s volume, is"
- " down. Please bring up the peer and"
- " retry.", down_peerstr,
- volinfo->volname);
- ret = -1;
- GF_FREE (down_peerstr);
- down_peerstr = NULL;
- goto out;
- }
+ ret = glusterd_op_verify_gsync_start_options(
+ volinfo, slave, conf_path, statefile, op_errstr, is_force);
+ if (ret)
+ goto out;
+ ctx = glusterd_op_get_ctx();
+ if (ctx) {
+ /* gsyncd does a fuse mount to start
+ * the geo-rep session */
+ if (!glusterd_is_fuse_available()) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_GEO_REP_START_FAILED,
+ "Unable "
+ "to open /dev/fuse (%s), "
+ "geo-replication start failed",
+ strerror(errno));
+ snprintf(errmsg, sizeof(errmsg), "fuse unavailable");
+ ret = -1;
+ goto out;
}
- }
+ }
+ break;
- switch (type) {
- case GF_GSYNC_OPTION_TYPE_START:
+ case GF_GSYNC_OPTION_TYPE_STOP:
+ if (!is_force) {
if (is_template_in_use) {
- snprintf (errmsg, sizeof(errmsg), "state-file entry "
- "missing in the config file(%s).",
- conf_path);
- ret = -1;
- goto out;
+ snprintf(errmsg, sizeof(errmsg),
+ "state-file entry missing in "
+ "the config file(%s).",
+ conf_path);
+ ret = -1;
+ goto out;
}
- ret = glusterd_op_verify_gsync_start_options (volinfo, slave,
- conf_path,
- statefile,
- op_errstr, is_force);
- if (ret)
+ ret = glusterd_op_verify_gsync_running(volinfo, slave,
+ conf_path, op_errstr);
+ if (ret) {
+ ret = glusterd_get_local_brickpaths(volinfo, &path_list);
+ if (!path_list && ret == -1)
goto out;
- ctx = glusterd_op_get_ctx();
- if (ctx) {
- /* gsyncd does a fuse mount to start
- * the geo-rep session */
- if (!glusterd_is_fuse_available ()) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_GEO_REP_START_FAILED, "Unable "
- "to open /dev/fuse (%s), "
- "geo-replication start failed",
- strerror (errno));
- snprintf (errmsg, sizeof(errmsg),
- "fuse unavailable");
- ret = -1;
- goto out;
- }
}
- break;
- case GF_GSYNC_OPTION_TYPE_STOP:
- if (!is_force) {
- if (is_template_in_use) {
- snprintf (errmsg, sizeof(errmsg),
- "state-file entry missing in "
- "the config file(%s).", conf_path);
- ret = -1;
- goto out;
- }
-
- ret = glusterd_op_verify_gsync_running (volinfo, slave,
- conf_path,
- op_errstr);
- if (ret) {
- ret = glusterd_get_local_brickpaths (volinfo,
- &path_list);
- if (!path_list && ret == -1)
- goto out;
- }
-
- /* Check for geo-rep session is active or not for
- * configured user.*/
- ret = glusterd_gsync_get_uuid (slave, volinfo, uuid);
- if (ret) {
- snprintf (errmsg, sizeof(errmsg),
- "Geo-replication session between %s "
- "and %s does not exist.",
- volinfo->volname, slave);
- ret = -1;
- goto out;
- }
+ /* Check for geo-rep session is active or not for
+ * configured user.*/
+ ret = glusterd_gsync_get_uuid(slave, volinfo, uuid);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Geo-replication session between %s "
+ "and %s does not exist.",
+ volinfo->volname, slave);
+ ret = -1;
+ goto out;
}
- break;
+ }
+ break;
case GF_GSYNC_OPTION_TYPE_PAUSE:
case GF_GSYNC_OPTION_TYPE_RESUME:
- if (is_template_in_use) {
- snprintf (errmsg, sizeof(errmsg),
- "state-file entry missing in "
- "the config file(%s).", conf_path);
- ret = -1;
- goto out;
- }
+ if (is_template_in_use) {
+ snprintf(errmsg, sizeof(errmsg),
+ "state-file entry missing in "
+ "the config file(%s).",
+ conf_path);
+ ret = -1;
+ goto out;
+ }
- ret = glusterd_op_verify_gsync_running (volinfo, slave,
- conf_path, op_errstr);
- if (ret) {
- ret = glusterd_get_local_brickpaths (volinfo,
- &path_list);
- if (!path_list && ret == -1)
- goto out;
- }
+ ret = glusterd_op_verify_gsync_running(volinfo, slave, conf_path,
+ op_errstr);
+ if (ret) {
+ ret = glusterd_get_local_brickpaths(volinfo, &path_list);
+ if (!path_list && ret == -1)
+ goto out;
+ }
+
+ /* Check for geo-rep session is active or not
+ * for configured user.*/
+ ret = glusterd_gsync_get_uuid(slave, volinfo, uuid);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Geo-replication"
+ " session between %s and %s does not exist.",
+ volinfo->volname, slave);
+ ret = -1;
+ goto out;
+ }
- /* Check for geo-rep session is active or not
- * for configured user.*/
- ret = glusterd_gsync_get_uuid (slave, volinfo, uuid);
+ if (!is_force) {
+ ret = gd_pause_resume_validation(type, volinfo, slave,
+ statefile, op_errstr);
if (ret) {
- snprintf (errmsg, sizeof(errmsg), "Geo-replication"
- " session between %s and %s does not exist.",
- volinfo->volname, slave);
- ret = -1;
+ ret = glusterd_get_local_brickpaths(volinfo, &path_list);
+ if (!path_list && ret == -1)
goto out;
}
-
- if (!is_force) {
- ret = gd_pause_resume_validation (type, volinfo, slave,
- statefile, op_errstr);
- if (ret) {
- ret = glusterd_get_local_brickpaths (volinfo,
- &path_list);
- if (!path_list && ret == -1)
- goto out;
- }
- }
- break;
+ }
+ break;
case GF_GSYNC_OPTION_TYPE_CONFIG:
- if (is_template_in_use) {
- snprintf (errmsg, sizeof(errmsg), "state-file entry "
- "missing in the config file(%s).",
- conf_path);
- ret = -1;
- goto out;
- }
-
- ret = gsync_verify_config_options (dict, op_errstr, volname);
+ if (is_template_in_use) {
+ snprintf(errmsg, sizeof(errmsg),
+ "state-file entry "
+ "missing in the config file(%s).",
+ conf_path);
+ ret = -1;
goto out;
- break;
+ }
+
+ ret = gsync_verify_config_options(dict, op_errstr, volname);
+ goto out;
+ break;
case GF_GSYNC_OPTION_TYPE_DELETE:
- /* Check if the gsync session is still running
- * If so ask the user to stop geo-replication first.*/
- if (is_template_in_use) {
- snprintf (errmsg, sizeof(errmsg), "state-file entry "
- "missing in the config file(%s).",
- conf_path);
- ret = -1;
- goto out;
- }
+ /* Check if the gsync session is still running
+ * If so ask the user to stop geo-replication first.*/
+ if (is_template_in_use) {
+ snprintf(errmsg, sizeof(errmsg),
+ "state-file entry "
+ "missing in the config file(%s).",
+ conf_path);
+ ret = -1;
+ goto out;
+ }
- ret = glusterd_gsync_get_uuid (slave, volinfo, uuid);
- if (ret) {
- snprintf (errmsg, sizeof(errmsg), "Geo-replication"
- " session between %s and %s does not exist.",
- volinfo->volname, slave);
- ret = -1;
- goto out;
- } else {
- ret = glusterd_check_gsync_running_local (volinfo->volname,
- slave, conf_path,
- &is_running);
- if (_gf_true == is_running) {
- snprintf (errmsg, sizeof (errmsg), GEOREP
- " session between %s & %s is "
- "still active. Please stop the "
- "session and retry.",
- volinfo->volname, slave);
- ret = -1;
- goto out;
- }
+ ret = glusterd_gsync_get_uuid(slave, volinfo, uuid);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Geo-replication"
+ " session between %s and %s does not exist.",
+ volinfo->volname, slave);
+ ret = -1;
+ goto out;
+ } else {
+ ret = glusterd_check_gsync_running_local(
+ volinfo->volname, slave, conf_path, &is_running);
+ if (_gf_true == is_running) {
+ snprintf(errmsg, sizeof(errmsg),
+ GEOREP
+ " session between %s & %s is "
+ "still active. Please stop the "
+ "session and retry.",
+ volinfo->volname, slave);
+ ret = -1;
+ goto out;
}
+ }
- ret = glusterd_verify_gsyncd_spawn (volinfo->volname, slave);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to spawn gsyncd");
- }
+ ret = glusterd_verify_gsyncd_spawn(volinfo->volname, slave);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg), "Unable to spawn gsyncd");
+ }
- break;
- }
+ break;
+ }
out:
- if (path_list)
- GF_FREE (path_list);
+ if (path_list)
+ GF_FREE(path_list);
- if (ret && errmsg[0] != '\0') {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
- "%s", errmsg);
- *op_errstr = gf_strdup (errmsg);
- }
+ if (ret && errmsg[0] != '\0') {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, "%s", errmsg);
+ *op_errstr = gf_strdup(errmsg);
+ }
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-gd_pause_or_resume_gsync (dict_t *dict, char *master, char *slave,
- char *slave_host, char *slave_vol, char *conf_path,
- char **op_errstr, gf_boolean_t is_pause)
+gd_pause_or_resume_gsync(dict_t *dict, char *master, char *slave,
+ char *slave_host, char *slave_vol, char *conf_path,
+ char **op_errstr, gf_boolean_t is_pause)
{
- int32_t ret = 0;
- int pfd = -1;
- pid_t pid = 0;
- char pidfile[PATH_MAX] = {0,};
- char errmsg[PATH_MAX] = "";
- char buf [4096] = {0,};
- gf_boolean_t is_template_in_use = _gf_false;
- char monitor_status[NAME_MAX] = {0,};
- char *statefile = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (dict);
- GF_ASSERT (master);
- GF_ASSERT (slave);
- GF_ASSERT (slave_host);
- GF_ASSERT (slave_vol);
- GF_ASSERT (conf_path);
-
- pfd = gsyncd_getpidfile (master, slave, pidfile,
- conf_path, &is_template_in_use);
- if (pfd == -2) {
- snprintf (errmsg, sizeof(errmsg),
- "pid-file entry mising in config file and "
- "template config file.");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND,
- "%s", errmsg);
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
+ int32_t ret = 0;
+ int pfd = -1;
+ pid_t pid = 0;
+ char pidfile[PATH_MAX] = {
+ 0,
+ };
+ char errmsg[PATH_MAX] = "";
+ char buf[4096] = {
+ 0,
+ };
+ gf_boolean_t is_template_in_use = _gf_false;
+ char monitor_status[NAME_MAX] = {
+ 0,
+ };
+ char *statefile = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(master);
+ GF_ASSERT(slave);
+ GF_ASSERT(slave_host);
+ GF_ASSERT(slave_vol);
+ GF_ASSERT(conf_path);
+
+ pfd = gsyncd_getpidfile(master, slave, pidfile, conf_path,
+ &is_template_in_use);
+ if (pfd == -2) {
+ snprintf(errmsg, sizeof(errmsg),
+ "pid-file entry mising in config file and "
+ "template config file.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND, "%s",
+ errmsg);
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ if (gsync_status_byfd(pfd) == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
+ "gsyncd b/w %s & %s is not running", master, slave);
+ /* monitor gsyncd already dead */
+ goto out;
+ }
+
+ if (pfd < 0)
+ goto out;
+
+ /* Prepare to update status file*/
+ ret = dict_get_str(dict, "statefile", &statefile);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Pause/Resume Failed: Unable to fetch statefile path");
+ goto out;
+ }
+ ret = glusterd_gsync_read_frm_status(statefile, monitor_status,
+ sizeof(monitor_status));
+ if (ret <= 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STAT_FILE_READ_FAILED,
+ "Pause/Resume Failed: "
+ "Unable to read status file for %s(master)"
+ " %s(slave)",
+ master, slave);
+ goto out;
+ }
+
+ ret = sys_read(pfd, buf, sizeof(buf));
+ if (ret > 0) {
+ pid = strtol(buf, NULL, 10);
+ if (is_pause) {
+ ret = kill(-pid, SIGSTOP);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_PID_KILL_FAIL,
+ "Failed"
+ " to pause gsyncd. Error: %s",
+ strerror(errno));
goto out;
- }
-
- if (gsync_status_byfd (pfd) == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
- "gsyncd b/w %s & %s is not running", master, slave);
- /* monitor gsyncd already dead */
- goto out;
- }
-
- if (pfd < 0)
- goto out;
-
- /* Prepare to update status file*/
- ret = dict_get_str (dict, "statefile", &statefile);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Pause/Resume Failed: Unable to fetch statefile path");
+ }
+ /*On pause force, if status is already paused
+ do not update status again*/
+ if (strstr(monitor_status, "Paused"))
+ goto out;
+
+ ret = glusterd_create_status_file(master, slave, slave_host,
+ slave_vol, "Paused");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_UPDATE_STATEFILE_FAILED,
+ "Unable to update state_file."
+ " Error : %s",
+ strerror(errno));
+ /* If status cannot be updated resume back */
+ if (kill(-pid, SIGCONT)) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Pause successful but could "
+ "not update status file. "
+ "Please use 'resume force' to"
+ " resume back and retry pause"
+ " to reflect in status");
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_PID_KILL_FAIL,
+ "Resume back Failed. Error:"
+ "%s",
+ strerror(errno));
+ *op_errstr = gf_strdup(errmsg);
+ }
goto out;
- }
- ret = glusterd_gsync_read_frm_status (statefile, monitor_status,
- sizeof (monitor_status));
- if (ret <= 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STAT_FILE_READ_FAILED, "Pause/Resume Failed: "
- "Unable to read status file for %s(master)"
- " %s(slave)", master, slave);
+ }
+ } else {
+ ret = glusterd_create_status_file(master, slave, slave_host,
+ slave_vol, "Started");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_UPDATE_STATEFILE_FAILED,
+ "Resume Failed: Unable to update "
+ "state_file. Error : %s",
+ strerror(errno));
goto out;
- }
-
- ret = sys_read (pfd, buf, sizeof (buf));
- if (ret > 0) {
- pid = strtol (buf, NULL, 10);
- if (is_pause) {
- ret = kill (-pid, SIGSTOP);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_PID_KILL_FAIL, "Failed"
- " to pause gsyncd. Error: %s",
- strerror (errno));
- goto out;
- }
- /*On pause force, if status is already paused
- do not update status again*/
- if (strstr (monitor_status, "Paused"))
- goto out;
-
- ret = glusterd_create_status_file ( master, slave,
- slave_host, slave_vol,
- "Paused");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_UPDATE_STATEFILE_FAILED,
- "Unable to update state_file."
- " Error : %s", strerror (errno));
- /* If status cannot be updated resume back */
- if (kill (-pid, SIGCONT)) {
- snprintf (errmsg, sizeof(errmsg),
- "Pause successful but could "
- "not update status file. "
- "Please use 'resume force' to"
- " resume back and retry pause"
- " to reflect in status");
- gf_msg (this->name, GF_LOG_ERROR,
- errno,
- GD_MSG_PID_KILL_FAIL,
- "Resume back Failed. Error:"
- "%s", strerror (errno));
- *op_errstr = gf_strdup (errmsg);
- }
- goto out;
- }
- } else {
- ret = glusterd_create_status_file (master, slave,
- slave_host,
- slave_vol,
- "Started");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_UPDATE_STATEFILE_FAILED,
- "Resume Failed: Unable to update "
- "state_file. Error : %s",
- strerror (errno));
- goto out;
- }
- ret = kill (-pid, SIGCONT);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_PID_KILL_FAIL,
- "Resumed Failed: Unable to send"
- " SIGCONT. Error: %s",
- strerror (errno));
- /* Process can't be resumed, update status
- * back to paused. */
- ret = glusterd_create_status_file (master,
- slave,
- slave_host,
- slave_vol,
- monitor_status);
- if (ret) {
- snprintf (errmsg, sizeof(errmsg),
- "Resume failed!!! Status "
- "inconsistent. Please use "
- "'resume force' to resume and"
- " reach consistent state");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATUS_UPDATE_FAILED,
- "Updating status back to paused"
- " Failed. Error: %s",
- strerror (errno));
- *op_errstr = gf_strdup (errmsg);
- }
- goto out;
- }
+ }
+ ret = kill(-pid, SIGCONT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_PID_KILL_FAIL,
+ "Resumed Failed: Unable to send"
+ " SIGCONT. Error: %s",
+ strerror(errno));
+ /* Process can't be resumed, update status
+ * back to paused. */
+ ret = glusterd_create_status_file(master, slave, slave_host,
+ slave_vol, monitor_status);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Resume failed!!! Status "
+ "inconsistent. Please use "
+ "'resume force' to resume and"
+ " reach consistent state");
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATUS_UPDATE_FAILED,
+ "Updating status back to paused"
+ " Failed. Error: %s",
+ strerror(errno));
+ *op_errstr = gf_strdup(errmsg);
}
+ goto out;
+ }
}
- ret = 0;
+ }
+ ret = 0;
out:
- sys_close (pfd);
- return ret;
+ sys_close(pfd);
+ return ret;
}
static int
-stop_gsync (char *master, char *slave, char **msg,
- char *conf_path, char **op_errstr,
- gf_boolean_t is_force)
+stop_gsync(char *master, char *slave, char **msg, char *conf_path,
+ char **op_errstr, gf_boolean_t is_force)
{
- int32_t ret = 0;
- int pfd = -1;
- pid_t pid = 0;
- char pidfile[PATH_MAX] = {0,};
- char errmsg[PATH_MAX] = "";
- char buf[4096] = {0,};
- int i = 0;
- gf_boolean_t is_template_in_use = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (this->private);
-
- pfd = gsyncd_getpidfile (master, slave, pidfile,
- conf_path, &is_template_in_use);
- if (pfd == -2) {
- snprintf (errmsg, sizeof(errmsg) - 1,
- "pid-file entry mising in config file and "
- "template config file.");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND,
- "%s", errmsg);
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
- }
- if (gsync_status_byfd (pfd) == -1 && !is_force) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
- "gsyncd b/w %s & %s is not running", master,
- slave);
- /* monitor gsyncd already dead */
- goto out;
- }
-
- if (pfd < 0)
- goto out;
-
- ret = sys_read (pfd, buf, sizeof (buf));
- if (ret > 0) {
- pid = strtol (buf, NULL, 10);
- ret = kill (-pid, SIGTERM);
- if (ret && !is_force) {
- gf_msg (this->name, GF_LOG_WARNING, errno,
- GD_MSG_PID_KILL_FAIL,
- "failed to kill gsyncd");
- goto out;
- }
- for (i = 0; i < 20; i++) {
- if (gsync_status_byfd (pfd) == -1) {
- /* monitor gsyncd is dead but worker may
- * still be alive, give some more time
- * before SIGKILL (hack)
- */
- usleep (50000);
- break;
- }
- usleep (50000);
- }
- kill (-pid, SIGKILL);
- sys_unlink (pidfile);
+ int32_t ret = 0;
+ int pfd = -1;
+ pid_t pid = 0;
+ char pidfile[PATH_MAX] = {
+ 0,
+ };
+ char errmsg[PATH_MAX] = "";
+ char buf[4096] = {
+ 0,
+ };
+ int i = 0;
+ gf_boolean_t is_template_in_use = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(this->private);
+
+ pfd = gsyncd_getpidfile(master, slave, pidfile, conf_path,
+ &is_template_in_use);
+ if (pfd == -2) {
+ snprintf(errmsg, sizeof(errmsg) - 1,
+ "pid-file entry mising in config file and "
+ "template config file.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND, "%s",
+ errmsg);
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
+ if (gsync_status_byfd(pfd) == -1 && !is_force) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
+ "gsyncd b/w %s & %s is not running", master, slave);
+ /* monitor gsyncd already dead */
+ goto out;
+ }
+
+ if (pfd < 0)
+ goto out;
+
+ ret = sys_read(pfd, buf, sizeof(buf));
+ if (ret > 0) {
+ pid = strtol(buf, NULL, 10);
+ ret = kill(-pid, SIGTERM);
+ if (ret && !is_force) {
+ gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_PID_KILL_FAIL,
+ "failed to kill gsyncd");
+ goto out;
+ }
+ for (i = 0; i < 20; i++) {
+ if (gsync_status_byfd(pfd) == -1) {
+ /* monitor gsyncd is dead but worker may
+ * still be alive, give some more time
+ * before SIGKILL (hack)
+ */
+ usleep(50000);
+ break;
+ }
+ usleep(50000);
}
- ret = 0;
+ kill(-pid, SIGKILL);
+ sys_unlink(pidfile);
+ }
+ ret = 0;
out:
- sys_close (pfd);
+ sys_close(pfd);
- return ret;
+ return ret;
}
/*
@@ -4149,2565 +4155,2539 @@ out:
*/
int
-glusterd_gsync_op_already_set (char* master, char* slave, char* conf_path,
- char* op_name, char* op_value)
+glusterd_gsync_op_already_set(char *master, char *slave, char *conf_path,
+ char *op_name, char *op_value)
{
- dict_t *confd = NULL;
- char *op_val_buf = NULL;
- int32_t op_val_conf = 0;
- int32_t op_val_cli = 0;
- int32_t ret = -1;
- gf_boolean_t is_bool = _gf_true;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- confd = dict_new ();
- if (!confd) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
- "Not able to create dict.");
- return -1;
- }
-
- ret = glusterd_gsync_get_config (master, slave, conf_path,
- confd);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GET_CONFIG_INFO_FAILED,
- "Unable to get configuration data for %s(master), "
- "%s(slave)", master, slave);
- goto out;
- }
-
- ret = dict_get_param (confd, op_name, &op_val_buf);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get op_value for %s(master), %s(slave). "
- "Please check gsync config file.", master, slave);
- ret = 1;
- goto out;
- }
-
- gf_msg_debug (this->name, 0, "val_cli:%s val_conf:%s", op_value,
- op_val_buf);
-
- if (!strcmp(op_val_buf,"true") || !strcmp(op_val_buf,"1")
- || !strcmp(op_val_buf,"yes")) {
- op_val_conf = 1;
- } else if(!strcmp(op_val_buf,"false") || !strcmp(op_val_buf,"0")
- || !strcmp(op_val_buf,"no")) {
- op_val_conf = 0;
+ dict_t *confd = NULL;
+ char *op_val_buf = NULL;
+ int32_t op_val_conf = 0;
+ int32_t op_val_cli = 0;
+ int32_t ret = -1;
+ gf_boolean_t is_bool = _gf_true;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ confd = dict_new();
+ if (!confd) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Not able to create dict.");
+ return -1;
+ }
+
+ ret = glusterd_gsync_get_config(master, slave, conf_path, confd);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED,
+ "Unable to get configuration data for %s(master), "
+ "%s(slave)",
+ master, slave);
+ goto out;
+ }
+
+ ret = dict_get_param(confd, op_name, &op_val_buf);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get op_value for %s(master), %s(slave). "
+ "Please check gsync config file.",
+ master, slave);
+ ret = 1;
+ goto out;
+ }
+
+ gf_msg_debug(this->name, 0, "val_cli:%s val_conf:%s", op_value,
+ op_val_buf);
+
+ if (!strcmp(op_val_buf, "true") || !strcmp(op_val_buf, "1") ||
+ !strcmp(op_val_buf, "yes")) {
+ op_val_conf = 1;
+ } else if (!strcmp(op_val_buf, "false") || !strcmp(op_val_buf, "0") ||
+ !strcmp(op_val_buf, "no")) {
+ op_val_conf = 0;
+ } else {
+ is_bool = _gf_false;
+ }
+
+ if (is_bool) {
+ if (!strcmp(op_value, "true") || !strcmp(op_value, "1") ||
+ !strcmp(op_value, "yes")) {
+ op_val_cli = 1;
} else {
- is_bool = _gf_false;
+ op_val_cli = 0;
}
- if (is_bool) {
- if (!strcmp(op_value,"true") || !strcmp(op_value,"1")
- || !strcmp(op_value,"yes")) {
- op_val_cli = 1;
- } else {
- op_val_cli = 0;
- }
-
- if ( op_val_cli == op_val_conf ) {
- ret = 0;
- goto out;
- }
- } else {
- if (!strcmp(op_val_buf,op_value)) {
- ret = 0;
- goto out;
- }
+ if (op_val_cli == op_val_conf) {
+ ret = 0;
+ goto out;
+ }
+ } else {
+ if (!strcmp(op_val_buf, op_value)) {
+ ret = 0;
+ goto out;
}
+ }
- ret = 1;
+ ret = 1;
out:
- dict_unref(confd);
- return ret;
+ dict_unref(confd);
+ return ret;
}
static int
-glusterd_gsync_configure (glusterd_volinfo_t *volinfo, char *slave,
- char *path_list, dict_t *dict,
- dict_t *resp_dict, char **op_errstr)
+glusterd_gsync_configure(glusterd_volinfo_t *volinfo, char *slave,
+ char *path_list, dict_t *dict, dict_t *resp_dict,
+ char **op_errstr)
{
- int32_t ret = -1;
- char *op_name = NULL;
- char *op_value = NULL;
- runner_t runner = {0,};
- glusterd_conf_t *priv = NULL;
- char *subop = NULL;
- char *master = NULL;
- char *conf_path = NULL;
- char *slave_host = NULL;
- char *slave_vol = NULL;
- struct stat stbuf = {0, };
- gf_boolean_t restart_required = _gf_true;
- char **resopt = NULL;
- gf_boolean_t op_already_set = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (slave);
- GF_ASSERT (op_errstr);
- GF_ASSERT (dict);
- GF_ASSERT (resp_dict);
-
- ret = dict_get_str (dict, "subop", &subop);
- if (ret != 0)
- goto out;
+ int32_t ret = -1;
+ char *op_name = NULL;
+ char *op_value = NULL;
+ runner_t runner = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+ char *subop = NULL;
+ char *master = NULL;
+ char *conf_path = NULL;
+ char *slave_host = NULL;
+ char *slave_vol = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ gf_boolean_t restart_required = _gf_true;
+ char **resopt = NULL;
+ gf_boolean_t op_already_set = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(slave);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(dict);
+ GF_ASSERT(resp_dict);
+
+ ret = dict_get_str(dict, "subop", &subop);
+ if (ret != 0)
+ goto out;
+
+ if (strcmp(subop, "get") == 0 || strcmp(subop, "get-all") == 0) {
+ /* deferred to cli */
+ gf_msg_debug(this->name, 0, "Returning 0");
+ return 0;
+ }
- if (strcmp (subop, "get") == 0 || strcmp (subop, "get-all") == 0) {
- /* deferred to cli */
- gf_msg_debug (this->name, 0, "Returning 0");
- return 0;
- }
+ ret = dict_get_str(dict, "op_name", &op_name);
+ if (ret != 0)
+ goto out;
- ret = dict_get_str (dict, "op_name", &op_name);
+ if (strtail(subop, "set")) {
+ ret = dict_get_str(dict, "op_value", &op_value);
if (ret != 0)
- goto out;
-
- if (strtail (subop, "set")) {
- ret = dict_get_str (dict, "op_value", &op_value);
- if (ret != 0)
- goto out;
- }
-
- priv = THIS->private;
- if (priv == NULL) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
- "priv of glusterd not present");
- *op_errstr = gf_strdup ("glusterd defunct");
- goto out;
+ goto out;
+ }
+
+ priv = THIS->private;
+ if (priv == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
+ "priv of glusterd not present");
+ *op_errstr = gf_strdup("glusterd defunct");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "conf_path", &conf_path);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch conf file path.");
+ goto out;
+ }
+
+ master = "";
+ runinit(&runner);
+ runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
+ runner_argprintf(&runner, "%s", conf_path);
+ runner_argprintf(&runner, "--iprefix=%s", DATADIR);
+ if (volinfo) {
+ master = volinfo->volname;
+ runner_argprintf(&runner, ":%s", master);
+ }
+ runner_add_arg(&runner, slave);
+ runner_argprintf(&runner, "--config-%s", subop);
+ runner_add_arg(&runner, op_name);
+ if (op_value)
+ runner_add_arg(&runner, op_value);
+
+ if (strcmp(op_name, "checkpoint") != 0 && strtail(subop, "set")) {
+ ret = glusterd_gsync_op_already_set(master, slave, conf_path, op_name,
+ op_value);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GSYNCD_OP_SET_FAILED,
+ "glusterd_gsync_op_already_set failed.");
+ gf_asprintf(op_errstr,
+ GEOREP
+ " config-%s failed for "
+ "%s %s",
+ subop, master, slave);
+ goto out;
}
-
- ret = dict_get_str (dict, "conf_path", &conf_path);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch conf file path.");
- goto out;
+ if (ret == 0) {
+ gf_msg_debug(this->name, 0, "op_value is already set");
+ op_already_set = _gf_true;
+ goto out;
}
+ }
- master = "";
- runinit (&runner);
- runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL);
- runner_argprintf (&runner, "%s", conf_path);
- runner_argprintf (&runner, "--iprefix=%s", DATADIR);
- if (volinfo) {
- master = volinfo->volname;
- runner_argprintf (&runner, ":%s", master);
- }
- runner_add_arg (&runner, slave);
- runner_argprintf (&runner, "--config-%s", subop);
- runner_add_arg (&runner, op_name);
- if (op_value)
- runner_add_arg (&runner, op_value);
-
- if ( strcmp(op_name,"checkpoint") != 0 && strtail (subop, "set")) {
- ret = glusterd_gsync_op_already_set(master,slave,conf_path,
- op_name,op_value);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_GSYNCD_OP_SET_FAILED,
- "glusterd_gsync_op_already_set failed.");
- gf_asprintf (op_errstr, GEOREP" config-%s failed for "
- "%s %s", subop, master, slave);
- goto out;
- }
- if (ret == 0) {
- gf_msg_debug (this->name, 0, "op_value is already set");
- op_already_set = _gf_true;
- goto out;
- }
- }
+ synclock_unlock(&priv->big_lock);
+ ret = runner_run(&runner);
+ synclock_lock(&priv->big_lock);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GSYNCD_ERROR,
+ "gsyncd failed to %s %s option for "
+ "%s %s peers",
+ subop, op_name, master, slave);
- synclock_unlock (&priv->big_lock);
- ret = runner_run (&runner);
- synclock_lock (&priv->big_lock);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_GSYNCD_ERROR,
- "gsyncd failed to %s %s option for "
- "%s %s peers", subop, op_name, master,
- slave);
+ gf_asprintf(op_errstr, GEOREP " config-%s failed for %s %s", subop,
+ master, slave);
- gf_asprintf (op_errstr, GEOREP" config-%s failed for %s %s",
- subop, master, slave);
+ goto out;
+ }
+ if ((!strcmp(op_name, "state_file")) && (op_value)) {
+ ret = sys_lstat(op_value, &stbuf);
+ if (ret) {
+ ret = dict_get_str(dict, "slave_host", &slave_host);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch slave host.");
goto out;
- }
+ }
- if ((!strcmp (op_name, "state_file")) && (op_value)) {
+ ret = dict_get_str(dict, "slave_vol", &slave_vol);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch slave volume name.");
+ goto out;
+ }
- ret = sys_lstat (op_value, &stbuf);
- if (ret) {
- ret = dict_get_str (dict, "slave_host", &slave_host);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to fetch slave host.");
- goto out;
- }
-
- ret = dict_get_str (dict, "slave_vol", &slave_vol);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to fetch slave volume name.");
- goto out;
- }
-
- ret = glusterd_create_status_file (volinfo->volname,
- slave, slave_host,
- slave_vol,
- "Switching Status "
- "File");
- if (ret || sys_lstat (op_value, &stbuf)) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_FILE_OP_FAILED, "Unable to "
- "create %s. Error : %s", op_value,
- strerror (errno));
- ret = -1;
- goto out;
- }
- }
+ ret = glusterd_create_status_file(volinfo->volname, slave,
+ slave_host, slave_vol,
+ "Switching Status "
+ "File");
+ if (ret || sys_lstat(op_value, &stbuf)) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to "
+ "create %s. Error : %s",
+ op_value, strerror(errno));
+ ret = -1;
+ goto out;
+ }
}
+ }
- ret = 0;
- gf_asprintf (op_errstr, "config-%s successful", subop);
+ ret = 0;
+ gf_asprintf(op_errstr, "config-%s successful", subop);
out:
- if (!ret && volinfo && !op_already_set) {
- for (resopt = gsync_no_restart_opts; *resopt; resopt++) {
- restart_required = _gf_true;
- if (!strcmp ((*resopt), op_name)){
- restart_required = _gf_false;
- break;
- }
+ if (!ret && volinfo && !op_already_set) {
+ for (resopt = gsync_no_restart_opts; *resopt; resopt++) {
+ restart_required = _gf_true;
+ if (!strcmp((*resopt), op_name)) {
+ restart_required = _gf_false;
+ break;
}
+ }
- if (restart_required) {
- ret = glusterd_check_restart_gsync_session (volinfo, slave,
- resp_dict, path_list,
- conf_path, 0);
- if (ret)
- *op_errstr = gf_strdup ("internal error");
- }
+ if (restart_required) {
+ ret = glusterd_check_restart_gsync_session(
+ volinfo, slave, resp_dict, path_list, conf_path, 0);
+ if (ret)
+ *op_errstr = gf_strdup("internal error");
}
+ }
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_gsync_read_frm_status (char *path, char *buf, size_t blen)
+glusterd_gsync_read_frm_status(char *path, char *buf, size_t blen)
{
- int ret = 0;
- int status_fd = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (path);
- GF_ASSERT (buf);
- status_fd = open (path, O_RDONLY);
- if (status_fd == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED,
- "Unable to read gsyncd status file %s", path);
- return -1;
- }
- ret = sys_read (status_fd, buf, blen - 1);
- if (ret > 0) {
- size_t len = strnlen (buf, ret);
- /* Ensure there is a NUL byte and that it's not the first. */
- if (len == 0 || len == blen - 1) {
- ret = -1;
- } else {
- char *p = buf + len - 1;
- while (isspace (*p))
- *p-- = '\0';
- }
- } else if (ret == 0)
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
- "Status file of gsyncd is empty");
- else /* ret < 0 */
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
- "Status file of gsyncd is corrupt");
-
- sys_close (status_fd);
- return ret;
+ int ret = 0;
+ int status_fd = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(path);
+ GF_ASSERT(buf);
+ status_fd = open(path, O_RDONLY);
+ if (status_fd == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED,
+ "Unable to read gsyncd status file %s", path);
+ return -1;
+ }
+ ret = sys_read(status_fd, buf, blen - 1);
+ if (ret > 0) {
+ size_t len = strnlen(buf, ret);
+ /* Ensure there is a NUL byte and that it's not the first. */
+ if (len == 0 || len == blen - 1) {
+ ret = -1;
+ } else {
+ char *p = buf + len - 1;
+ while (isspace(*p))
+ *p-- = '\0';
+ }
+ } else if (ret == 0)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
+ "Status file of gsyncd is empty");
+ else /* ret < 0 */
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
+ "Status file of gsyncd is corrupt");
+
+ sys_close(status_fd);
+ return ret;
}
static int
-dict_get_param (dict_t *dict, char *key, char **param)
+dict_get_param(dict_t *dict, char *key, char **param)
{
- char *dk = NULL;
- char *s = NULL;
- char x = '\0';
- int ret = 0;
+ char *dk = NULL;
+ char *s = NULL;
+ char x = '\0';
+ int ret = 0;
- if (dict_get_str (dict, key, param) == 0)
- return 0;
+ if (dict_get_str(dict, key, param) == 0)
+ return 0;
- dk = gf_strdup (key);
- if (!dk)
- return -1;
+ dk = gf_strdup(key);
+ if (!dk)
+ return -1;
- s = strpbrk (dk, "-_");
- if (!s) {
- ret = -1;
- goto out;
- }
- x = (*s == '-') ? '_' : '-';
+ s = strpbrk(dk, "-_");
+ if (!s) {
+ ret = -1;
+ goto out;
+ }
+ x = (*s == '-') ? '_' : '-';
+ *s++ = x;
+ while ((s = strpbrk(s, "-_")))
*s++ = x;
- while ((s = strpbrk (s, "-_")))
- *s++ = x;
- ret = dict_get_str (dict, dk, param);
+ ret = dict_get_str(dict, dk, param);
out:
- GF_FREE (dk);
- return ret;
+ GF_FREE(dk);
+ return ret;
}
int
-glusterd_fetch_values_from_config (char *master, char *slave,
- char *confpath, dict_t *confd,
- char **statefile,
- char **georep_session_wrkng_dir,
- char **socketfile)
+glusterd_fetch_values_from_config(char *master, char *slave, char *confpath,
+ dict_t *confd, char **statefile,
+ char **georep_session_wrkng_dir,
+ char **socketfile)
{
- int ret = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = glusterd_gsync_get_config (master, slave, confpath,
- confd);
+ int ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = glusterd_gsync_get_config(master, slave, confpath, confd);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED,
+ "Unable to get configuration data for %s(master), "
+ "%s(slave)",
+ master, slave);
+ goto out;
+ }
+
+ if (statefile) {
+ ret = dict_get_param(confd, "state_file", statefile);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GET_CONFIG_INFO_FAILED,
- "Unable to get configuration data for %s(master), "
- "%s(slave)", master, slave);
- goto out;
- }
-
- if (statefile) {
- ret = dict_get_param (confd, "state_file", statefile);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get state_file's name "
- "for %s(master), %s(slave). "
- "Please check gsync config file.",
- master, slave);
- goto out;
- }
- }
-
- if (georep_session_wrkng_dir) {
- ret = dict_get_param (confd, "georep_session_working_dir",
- georep_session_wrkng_dir);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get geo-rep session's "
- "working directory name for %s(master), "
- "%s(slave). Please check gsync config file.",
- master, slave);
- goto out;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get state_file's name "
+ "for %s(master), %s(slave). "
+ "Please check gsync config file.",
+ master, slave);
+ goto out;
+ }
+ }
+
+ if (georep_session_wrkng_dir) {
+ ret = dict_get_param(confd, "georep_session_working_dir",
+ georep_session_wrkng_dir);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get geo-rep session's "
+ "working directory name for %s(master), "
+ "%s(slave). Please check gsync config file.",
+ master, slave);
+ goto out;
}
+ }
- if (socketfile) {
- ret = dict_get_param (confd, "state_socket_unencoded",
- socketfile);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get socket file's name "
- "for %s(master), %s(slave). "
- "Please check gsync config file.",
- master, slave);
- goto out;
- }
+ if (socketfile) {
+ ret = dict_get_param(confd, "state_socket_unencoded", socketfile);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get socket file's name "
+ "for %s(master), %s(slave). "
+ "Please check gsync config file.",
+ master, slave);
+ goto out;
}
+ }
- ret = 0;
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_read_status_file (glusterd_volinfo_t *volinfo, char *slave,
- char *conf_path, dict_t *dict, char *node)
+glusterd_read_status_file(glusterd_volinfo_t *volinfo, char *slave,
+ char *conf_path, dict_t *dict, char *node)
{
- char temp_conf_path[PATH_MAX] = "";
- char *working_conf_path = NULL;
- char *georep_session_wrkng_dir = NULL;
- char *master = NULL;
- char sts_val_name[1024] = "";
- char monitor_status[NAME_MAX] = "";
- char *statefile = NULL;
- char *socketfile = NULL;
- dict_t *confd = NULL;
- char *slavekey = NULL;
- char *slaveentry = NULL;
- char *slaveuser = NULL;
- char *saveptr = NULL;
- char *temp = NULL;
- char *temp_inp = NULL;
- char *brick_host_uuid = NULL;
- int brick_host_uuid_length = 0;
- int gsync_count = 0;
- int ret = 0;
- glusterd_brickinfo_t *brickinfo = NULL;
- gf_gsync_status_t *sts_val = NULL;
- gf_boolean_t is_template_in_use = _gf_false;
- glusterd_conf_t *priv = NULL;
- struct stat stbuf = {0,};
- xlator_t *this = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (this->private);
- GF_ASSERT (volinfo);
- GF_ASSERT (conf_path);
-
- master = volinfo->volname;
-
- confd = dict_new ();
- if (!confd) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
- "Not able to create dict.");
- return -1;
- }
-
- priv = THIS->private;
-
- len = snprintf (temp_conf_path, sizeof(temp_conf_path),
- "%s/"GSYNC_CONF_TEMPLATE, priv->workdir);
- if ((len < 0) || (len >= sizeof(temp_conf_path))) {
- return -1;
- }
+ char temp_conf_path[PATH_MAX] = "";
+ char *working_conf_path = NULL;
+ char *georep_session_wrkng_dir = NULL;
+ char *master = NULL;
+ char sts_val_name[1024] = "";
+ char monitor_status[NAME_MAX] = "";
+ char *statefile = NULL;
+ char *socketfile = NULL;
+ dict_t *confd = NULL;
+ char *slavekey = NULL;
+ char *slaveentry = NULL;
+ char *slaveuser = NULL;
+ char *saveptr = NULL;
+ char *temp = NULL;
+ char *temp_inp = NULL;
+ char *brick_host_uuid = NULL;
+ int brick_host_uuid_length = 0;
+ int gsync_count = 0;
+ int ret = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ gf_gsync_status_t *sts_val = NULL;
+ gf_boolean_t is_template_in_use = _gf_false;
+ glusterd_conf_t *priv = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(this->private);
+ GF_ASSERT(volinfo);
+ GF_ASSERT(conf_path);
+
+ master = volinfo->volname;
+
+ confd = dict_new();
+ if (!confd) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Not able to create dict.");
+ return -1;
+ }
- ret = sys_lstat (conf_path, &stbuf);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_CONFIG_INFO,
- "Using passed config template(%s).",
- conf_path);
- working_conf_path = conf_path;
- } else {
- gf_msg (this->name, GF_LOG_WARNING, ENOENT,
- GD_MSG_FILE_OP_FAILED,
- "Config file (%s) missing. Looking for template "
- "config file (%s)", conf_path, temp_conf_path);
- ret = sys_lstat (temp_conf_path, &stbuf);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, ENOENT,
- GD_MSG_FILE_OP_FAILED, "Template "
- "config file (%s) missing.", temp_conf_path);
- goto out;
- }
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG,
- "Using default config template(%s).", temp_conf_path);
- working_conf_path = temp_conf_path;
- is_template_in_use = _gf_true;
- }
+ priv = THIS->private;
-fetch_data:
- ret = glusterd_fetch_values_from_config (master, slave,
- working_conf_path,
- confd,
- &statefile,
- &georep_session_wrkng_dir,
- &socketfile);
+ len = snprintf(temp_conf_path, sizeof(temp_conf_path),
+ "%s/" GSYNC_CONF_TEMPLATE, priv->workdir);
+ if ((len < 0) || (len >= sizeof(temp_conf_path))) {
+ return -1;
+ }
+
+ ret = sys_lstat(conf_path, &stbuf);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONFIG_INFO,
+ "Using passed config template(%s).", conf_path);
+ working_conf_path = conf_path;
+ } else {
+ gf_msg(this->name, GF_LOG_WARNING, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "Config file (%s) missing. Looking for template "
+ "config file (%s)",
+ conf_path, temp_conf_path);
+ ret = sys_lstat(temp_conf_path, &stbuf);
if (ret) {
- if (is_template_in_use == _gf_false) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FETCH_CONFIG_VAL_FAILED,
- "Unable to fetch config values "
- "for %s(master), %s(slave). "
- "Trying default config template",
- master, slave);
- working_conf_path = temp_conf_path;
- is_template_in_use = _gf_true;
- goto fetch_data;
- } else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FETCH_CONFIG_VAL_FAILED, "Unable to "
- "fetch config values for %s(master), "
- "%s(slave)", master, slave);
- goto out;
- }
- }
+ gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "Template "
+ "config file (%s) missing.",
+ temp_conf_path);
+ goto out;
+ }
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG,
+ "Using default config template(%s).", temp_conf_path);
+ working_conf_path = temp_conf_path;
+ is_template_in_use = _gf_true;
+ }
- ret = glusterd_gsync_read_frm_status (statefile, monitor_status,
- sizeof (monitor_status));
- if (ret <= 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STAT_FILE_READ_FAILED,
- "Unable to read the status file for %s(master), "
- "%s(slave) statefile: %s", master, slave,
- statefile);
- snprintf (monitor_status, sizeof (monitor_status), "defunct");
+fetch_data:
+ ret = glusterd_fetch_values_from_config(
+ master, slave, working_conf_path, confd, &statefile,
+ &georep_session_wrkng_dir, &socketfile);
+ if (ret) {
+ if (is_template_in_use == _gf_false) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FETCH_CONFIG_VAL_FAILED,
+ "Unable to fetch config values "
+ "for %s(master), %s(slave). "
+ "Trying default config template",
+ master, slave);
+ working_conf_path = temp_conf_path;
+ is_template_in_use = _gf_true;
+ goto fetch_data;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FETCH_CONFIG_VAL_FAILED,
+ "Unable to "
+ "fetch config values for %s(master), "
+ "%s(slave)",
+ master, slave);
+ goto out;
+ }
+ }
+
+ ret = glusterd_gsync_read_frm_status(statefile, monitor_status,
+ sizeof(monitor_status));
+ if (ret <= 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STAT_FILE_READ_FAILED,
+ "Unable to read the status file for %s(master), "
+ "%s(slave) statefile: %s",
+ master, slave, statefile);
+ snprintf(monitor_status, sizeof(monitor_status), "defunct");
+ }
+
+ ret = dict_get_int32(dict, "gsync-count", &gsync_count);
+ if (ret)
+ gsync_count = 0;
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
+ continue;
+
+ sts_val = GF_CALLOC(1, sizeof(gf_gsync_status_t),
+ gf_common_mt_gsync_status_t);
+ if (!sts_val) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Out Of Memory");
+ goto out;
+ }
+
+ /* Slave Key */
+ ret = glusterd_get_slave(volinfo, slave, &slavekey);
+ if (ret < 0) {
+ GF_FREE(sts_val);
+ goto out;
}
+ memcpy(sts_val->slavekey, slavekey, strlen(slavekey));
+ sts_val->slavekey[strlen(slavekey)] = '\0';
- ret = dict_get_int32 (dict, "gsync-count", &gsync_count);
- if (ret)
- gsync_count = 0;
-
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID))
- continue;
-
- sts_val = GF_CALLOC (1, sizeof(gf_gsync_status_t),
- gf_common_mt_gsync_status_t);
- if (!sts_val) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY,
- "Out Of Memory");
- goto out;
- }
-
- /* Slave Key */
- ret = glusterd_get_slave (volinfo, slave, &slavekey);
- if (ret < 0) {
- GF_FREE (sts_val);
- goto out;
- }
- memcpy (sts_val->slavekey, slavekey, strlen(slavekey));
- sts_val->slavekey[strlen(slavekey)] = '\0';
-
- /* Master Volume */
- memcpy (sts_val->master, master, strlen(master));
- sts_val->master[strlen(master)] = '\0';
+ /* Master Volume */
+ memcpy(sts_val->master, master, strlen(master));
+ sts_val->master[strlen(master)] = '\0';
- /* Master Brick Node */
- memcpy (sts_val->node, brickinfo->hostname,
- strlen(brickinfo->hostname));
- sts_val->node[strlen(brickinfo->hostname)] = '\0';
+ /* Master Brick Node */
+ memcpy(sts_val->node, brickinfo->hostname, strlen(brickinfo->hostname));
+ sts_val->node[strlen(brickinfo->hostname)] = '\0';
- /* Master Brick Path */
- memcpy (sts_val->brick, brickinfo->path,
- strlen(brickinfo->path));
- sts_val->brick[strlen(brickinfo->path)] = '\0';
+ /* Master Brick Path */
+ memcpy(sts_val->brick, brickinfo->path, strlen(brickinfo->path));
+ sts_val->brick[strlen(brickinfo->path)] = '\0';
- /* Brick Host UUID */
- brick_host_uuid = uuid_utoa(brickinfo->uuid);
- brick_host_uuid_length = strlen (brick_host_uuid);
- memcpy (sts_val->brick_host_uuid, brick_host_uuid,
- brick_host_uuid_length);
- sts_val->brick_host_uuid[brick_host_uuid_length] = '\0';
+ /* Brick Host UUID */
+ brick_host_uuid = uuid_utoa(brickinfo->uuid);
+ brick_host_uuid_length = strlen(brick_host_uuid);
+ memcpy(sts_val->brick_host_uuid, brick_host_uuid,
+ brick_host_uuid_length);
+ sts_val->brick_host_uuid[brick_host_uuid_length] = '\0';
- /* Slave */
- memcpy (sts_val->slave, slave, strlen(slave));
- sts_val->slave[strlen(slave)] = '\0';
+ /* Slave */
+ memcpy(sts_val->slave, slave, strlen(slave));
+ sts_val->slave[strlen(slave)] = '\0';
- snprintf (sts_val->slave_node,
- sizeof(sts_val->slave_node), "N/A");
+ snprintf(sts_val->slave_node, sizeof(sts_val->slave_node), "N/A");
- snprintf (sts_val->worker_status,
- sizeof(sts_val->worker_status), "N/A");
+ snprintf(sts_val->worker_status, sizeof(sts_val->worker_status), "N/A");
- snprintf (sts_val->crawl_status,
- sizeof(sts_val->crawl_status), "N/A");
+ snprintf(sts_val->crawl_status, sizeof(sts_val->crawl_status), "N/A");
- snprintf (sts_val->last_synced,
- sizeof(sts_val->last_synced), "N/A");
+ snprintf(sts_val->last_synced, sizeof(sts_val->last_synced), "N/A");
- snprintf (sts_val->last_synced_utc,
- sizeof(sts_val->last_synced_utc), "N/A");
+ snprintf(sts_val->last_synced_utc, sizeof(sts_val->last_synced_utc),
+ "N/A");
- snprintf (sts_val->entry, sizeof(sts_val->entry), "N/A");
+ snprintf(sts_val->entry, sizeof(sts_val->entry), "N/A");
- snprintf (sts_val->data, sizeof(sts_val->data), "N/A");
+ snprintf(sts_val->data, sizeof(sts_val->data), "N/A");
- snprintf (sts_val->meta, sizeof(sts_val->meta), "N/A");
+ snprintf(sts_val->meta, sizeof(sts_val->meta), "N/A");
- snprintf (sts_val->failures, sizeof(sts_val->failures), "N/A");
+ snprintf(sts_val->failures, sizeof(sts_val->failures), "N/A");
- snprintf (sts_val->checkpoint_time,
- sizeof(sts_val->checkpoint_time), "N/A");
+ snprintf(sts_val->checkpoint_time, sizeof(sts_val->checkpoint_time),
+ "N/A");
- snprintf (sts_val->checkpoint_time_utc,
- sizeof(sts_val->checkpoint_time_utc), "N/A");
+ snprintf(sts_val->checkpoint_time_utc,
+ sizeof(sts_val->checkpoint_time_utc), "N/A");
- snprintf (sts_val->checkpoint_completed,
- sizeof(sts_val->checkpoint_completed), "N/A");
+ snprintf(sts_val->checkpoint_completed,
+ sizeof(sts_val->checkpoint_completed), "N/A");
- snprintf (sts_val->checkpoint_completion_time,
- sizeof(sts_val->checkpoint_completion_time),
- "N/A");
+ snprintf(sts_val->checkpoint_completion_time,
+ sizeof(sts_val->checkpoint_completion_time), "N/A");
- snprintf (sts_val->checkpoint_completion_time_utc,
- sizeof(sts_val->checkpoint_completion_time_utc),
- "N/A");
+ snprintf(sts_val->checkpoint_completion_time_utc,
+ sizeof(sts_val->checkpoint_completion_time_utc), "N/A");
- /* Get all the other values from Gsyncd */
- ret = glusterd_gsync_get_status (master, slave, conf_path,
- brickinfo->path, sts_val);
+ /* Get all the other values from Gsyncd */
+ ret = glusterd_gsync_get_status(master, slave, conf_path,
+ brickinfo->path, sts_val);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GET_STATUS_DATA_FAIL,
- "Unable to get status data "
- "for %s(master), %s(slave), %s(brick)",
- master, slave, brickinfo->path);
- ret = -1;
- goto out;
- }
-
- if (is_template_in_use) {
- snprintf (sts_val->worker_status,
- sizeof(sts_val->worker_status),
- "Config Corrupted");
- }
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_STATUS_DATA_FAIL,
+ "Unable to get status data "
+ "for %s(master), %s(slave), %s(brick)",
+ master, slave, brickinfo->path);
+ ret = -1;
+ goto out;
+ }
- ret = dict_get_str (volinfo->gsync_slaves, slavekey,
- &slaveentry);
- if (ret < 0) {
- GF_FREE (sts_val);
- goto out;
- }
+ if (is_template_in_use) {
+ snprintf(sts_val->worker_status, sizeof(sts_val->worker_status),
+ "Config Corrupted");
+ }
+ ret = dict_get_str(volinfo->gsync_slaves, slavekey, &slaveentry);
+ if (ret < 0) {
+ GF_FREE(sts_val);
+ goto out;
+ }
- memcpy (sts_val->session_slave, slaveentry,
- strlen(slaveentry));
- sts_val->session_slave[strlen(slaveentry)] = '\0';
+ memcpy(sts_val->session_slave, slaveentry, strlen(slaveentry));
+ sts_val->session_slave[strlen(slaveentry)] = '\0';
- temp_inp = gf_strdup(slaveentry);
- if (!temp_inp)
- goto out;
+ temp_inp = gf_strdup(slaveentry);
+ if (!temp_inp)
+ goto out;
- if (strstr(temp_inp, "@") == NULL) {
- slaveuser = "root";
- } else {
- temp = strtok_r(temp_inp, "//", &saveptr);
- temp = strtok_r(NULL, "/", &saveptr);
- slaveuser = strtok_r(temp, "@", &saveptr);
- }
- memcpy (sts_val->slave_user, slaveuser,
- strlen(slaveuser));
- sts_val->slave_user[strlen(slaveuser)] = '\0';
-
- snprintf (sts_val_name, sizeof (sts_val_name),
- "status_value%d", gsync_count);
- ret = dict_set_bin (dict, sts_val_name, sts_val,
- sizeof(gf_gsync_status_t));
- if (ret) {
- GF_FREE (sts_val);
- goto out;
- }
+ if (strstr(temp_inp, "@") == NULL) {
+ slaveuser = "root";
+ } else {
+ temp = strtok_r(temp_inp, "//", &saveptr);
+ temp = strtok_r(NULL, "/", &saveptr);
+ slaveuser = strtok_r(temp, "@", &saveptr);
+ }
+ memcpy(sts_val->slave_user, slaveuser, strlen(slaveuser));
+ sts_val->slave_user[strlen(slaveuser)] = '\0';
- gsync_count++;
- sts_val = NULL;
+ snprintf(sts_val_name, sizeof(sts_val_name), "status_value%d",
+ gsync_count);
+ ret = dict_set_bin(dict, sts_val_name, sts_val,
+ sizeof(gf_gsync_status_t));
+ if (ret) {
+ GF_FREE(sts_val);
+ goto out;
}
- ret = dict_set_int32 (dict, "gsync-count", gsync_count);
- if (ret)
- goto out;
+ gsync_count++;
+ sts_val = NULL;
+ }
+
+ ret = dict_set_int32(dict, "gsync-count", gsync_count);
+ if (ret)
+ goto out;
out:
- GF_FREE (temp_inp);
- dict_unref (confd);
+ GF_FREE(temp_inp);
+ dict_unref(confd);
- return 0;
+ return 0;
}
int
-glusterd_check_restart_gsync_session (glusterd_volinfo_t *volinfo, char *slave,
- dict_t *resp_dict, char *path_list,
- char *conf_path, gf_boolean_t is_force)
+glusterd_check_restart_gsync_session(glusterd_volinfo_t *volinfo, char *slave,
+ dict_t *resp_dict, char *path_list,
+ char *conf_path, gf_boolean_t is_force)
{
-
- int ret = 0;
- glusterd_conf_t *priv = NULL;
- char *status_msg = NULL;
- gf_boolean_t is_running = _gf_false;
- char *op_errstr = NULL;
- char *key = NULL;
- xlator_t *this = NULL;
-
- GF_ASSERT (volinfo);
- GF_ASSERT (slave);
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- key = slave;
-
- ret = glusterd_check_gsync_running_local (volinfo->volname,
- slave, conf_path,
- &is_running);
- if (!ret && (_gf_true != is_running))
- /* gsynd not running, nothing to do */
+ int ret = 0;
+ glusterd_conf_t *priv = NULL;
+ char *status_msg = NULL;
+ gf_boolean_t is_running = _gf_false;
+ char *op_errstr = NULL;
+ char *key = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(volinfo);
+ GF_ASSERT(slave);
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ key = slave;
+
+ ret = glusterd_check_gsync_running_local(volinfo->volname, slave, conf_path,
+ &is_running);
+ if (!ret && (_gf_true != is_running))
+ /* gsynd not running, nothing to do */
+ goto out;
+
+ ret = stop_gsync(volinfo->volname, slave, &status_msg, conf_path,
+ &op_errstr, is_force);
+ if (ret == 0 && status_msg)
+ ret = dict_set_str(resp_dict, "gsync-status", status_msg);
+ if (ret == 0) {
+ dict_del(volinfo->gsync_active_slaves, key);
+ ret = glusterd_start_gsync(volinfo, slave, path_list, conf_path,
+ uuid_utoa(MY_UUID), NULL, _gf_false);
+ if (!ret) {
+ /* Add slave to the dict indicating geo-rep session is
+ * running.*/
+ ret = dict_set_dynstr_with_alloc(volinfo->gsync_active_slaves, key,
+ "running");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set"
+ " key:%s value:running in dict. But "
+ "the config succeeded.",
+ key);
goto out;
-
- ret = stop_gsync (volinfo->volname, slave, &status_msg,
- conf_path, &op_errstr,
- is_force);
- if (ret == 0 && status_msg)
- ret = dict_set_str (resp_dict, "gsync-status",
- status_msg);
- if (ret == 0) {
- dict_del (volinfo->gsync_active_slaves, key);
- ret = glusterd_start_gsync (volinfo, slave, path_list,
- conf_path, uuid_utoa(MY_UUID),
- NULL, _gf_false);
- if (!ret) {
- /* Add slave to the dict indicating geo-rep session is
- * running.*/
- ret = dict_set_dynstr_with_alloc (
- volinfo->gsync_active_slaves,
- key, "running");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Unable to set"
- " key:%s value:running in dict. But "
- "the config succeeded.", key);
- goto out;
- }
- }
+ }
}
+ }
- out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- if (op_errstr)
- GF_FREE (op_errstr);
- return ret;
+out:
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ if (op_errstr)
+ GF_FREE(op_errstr);
+ return ret;
}
static int32_t
-glusterd_marker_changelog_create_volfile (glusterd_volinfo_t *volinfo)
+glusterd_marker_changelog_create_volfile(glusterd_volinfo_t *volinfo)
{
- int32_t ret = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
- "Unable to create volfile for setting of marker "
- "while '"GEOREP" start'");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
- goto out;
-
- if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_manager (volinfo);
- goto out;
- }
- ret = 0;
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Unable to create volfile for setting of marker "
+ "while '" GEOREP " start'");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ goto out;
+
+ if (GLUSTERD_STATUS_STARTED == volinfo->status) {
+ ret = glusterd_svcs_manager(volinfo);
+ goto out;
+ }
+ ret = 0;
out:
- return ret;
+ return ret;
}
static int
-glusterd_set_gsync_knob (glusterd_volinfo_t *volinfo, char *key, int *vc)
+glusterd_set_gsync_knob(glusterd_volinfo_t *volinfo, char *key, int *vc)
{
- int ret = -1;
- int conf_enabled = _gf_false;
- xlator_t *this = NULL;
+ int ret = -1;
+ int conf_enabled = _gf_false;
+ xlator_t *this = NULL;
- this = THIS;
- GF_ASSERT (this);
+ this = THIS;
+ GF_ASSERT(this);
- GF_ASSERT (this->private);
+ GF_ASSERT(this->private);
- conf_enabled = glusterd_volinfo_get_boolean (volinfo, key);
- if (conf_enabled == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GET_KEY_FAILED,
- "failed to get key %s from volinfo", key);
- goto out;
- }
+ conf_enabled = glusterd_volinfo_get_boolean(volinfo, key);
+ if (conf_enabled == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_KEY_FAILED,
+ "failed to get key %s from volinfo", key);
+ goto out;
+ }
- ret = 0;
- if (conf_enabled == _gf_false) {
- *vc = 1;
- ret = glusterd_gsync_volinfo_dict_set (volinfo,
- key, "on");
- }
+ ret = 0;
+ if (conf_enabled == _gf_false) {
+ *vc = 1;
+ ret = glusterd_gsync_volinfo_dict_set(volinfo, key, "on");
+ }
- out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+out:
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_set_gsync_confs (glusterd_volinfo_t *volinfo)
+glusterd_set_gsync_confs(glusterd_volinfo_t *volinfo)
{
- int ret = -1;
- int volfile_changed = 0;
+ int ret = -1;
+ int volfile_changed = 0;
- ret = glusterd_set_gsync_knob (volinfo,
- VKEY_MARKER_XTIME, &volfile_changed);
- if (ret)
- goto out;
+ ret = glusterd_set_gsync_knob(volinfo, VKEY_MARKER_XTIME, &volfile_changed);
+ if (ret)
+ goto out;
- /**
- * enable ignore-pid-check blindly as it could be needed for
- * cascading setups.
- */
- ret = glusterd_set_gsync_knob (volinfo, VKEY_MARKER_XTIME_FORCE,
- &volfile_changed);
- if (ret)
- goto out;
+ /**
+ * enable ignore-pid-check blindly as it could be needed for
+ * cascading setups.
+ */
+ ret = glusterd_set_gsync_knob(volinfo, VKEY_MARKER_XTIME_FORCE,
+ &volfile_changed);
+ if (ret)
+ goto out;
- ret = glusterd_set_gsync_knob (volinfo,
- VKEY_CHANGELOG, &volfile_changed);
- if (ret)
- goto out;
+ ret = glusterd_set_gsync_knob(volinfo, VKEY_CHANGELOG, &volfile_changed);
+ if (ret)
+ goto out;
- if (volfile_changed)
- ret = glusterd_marker_changelog_create_volfile (volinfo);
+ if (volfile_changed)
+ ret = glusterd_marker_changelog_create_volfile(volinfo);
- out:
- return ret;
+out:
+ return ret;
}
static int
-glusterd_get_gsync_status_mst_slv (glusterd_volinfo_t *volinfo,
- char *slave, char *conf_path,
- dict_t *rsp_dict, char *node)
+glusterd_get_gsync_status_mst_slv(glusterd_volinfo_t *volinfo, char *slave,
+ char *conf_path, dict_t *rsp_dict, char *node)
{
- char *statefile = NULL;
- uuid_t uuid = {0, };
- int ret = 0;
- gf_boolean_t is_template_in_use = _gf_false;
- struct stat stbuf = {0, };
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (volinfo);
- GF_ASSERT (slave);
- GF_ASSERT (this->private);
-
- ret = glusterd_gsync_get_uuid (slave, volinfo, uuid);
+ char *statefile = NULL;
+ uuid_t uuid = {
+ 0,
+ };
+ int ret = 0;
+ gf_boolean_t is_template_in_use = _gf_false;
+ struct stat stbuf = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(volinfo);
+ GF_ASSERT(slave);
+ GF_ASSERT(this->private);
+
+ ret = glusterd_gsync_get_uuid(slave, volinfo, uuid);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SESSION_INACTIVE,
+ "geo-replication status %s %s : session is not "
+ "active",
+ volinfo->volname, slave);
+
+ ret = glusterd_get_statefile_name(volinfo, slave, conf_path, &statefile,
+ &is_template_in_use);
if (ret) {
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_SESSION_INACTIVE,
- "geo-replication status %s %s : session is not "
- "active", volinfo->volname, slave);
-
- ret = glusterd_get_statefile_name (volinfo, slave,
- conf_path, &statefile,
- &is_template_in_use);
- if (ret) {
- if (!strstr(slave, "::"))
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_SLAVE_URL_INVALID,
- "%s is not a valid slave url.", slave);
- else
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_GET_STATEFILE_NAME_FAILED,
- "Unable to get statefile's name");
- ret = 0;
- goto out;
- }
-
- ret = sys_lstat (statefile, &stbuf);
- if (ret) {
- gf_msg (this->name, GF_LOG_INFO, ENOENT,
- GD_MSG_FILE_OP_FAILED,
- "%s statefile not present.", statefile);
- ret = 0;
- goto out;
- }
+ if (!strstr(slave, "::"))
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SLAVE_URL_INVALID,
+ "%s is not a valid slave url.", slave);
+ else
+ gf_msg(this->name, GF_LOG_INFO, 0,
+ GD_MSG_GET_STATEFILE_NAME_FAILED,
+ "Unable to get statefile's name");
+ ret = 0;
+ goto out;
+ }
+
+ ret = sys_lstat(statefile, &stbuf);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_INFO, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "%s statefile not present.", statefile);
+ ret = 0;
+ goto out;
}
+ }
- ret = glusterd_read_status_file (volinfo, slave, conf_path,
- rsp_dict, node);
+ ret = glusterd_read_status_file(volinfo, slave, conf_path, rsp_dict, node);
out:
- if (statefile)
- GF_FREE (statefile);
+ if (statefile)
+ GF_FREE(statefile);
- gf_msg_debug (this->name, 0, "Returning with %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ return ret;
}
int
-glusterd_get_gsync_status_mst (glusterd_volinfo_t *volinfo, dict_t *rsp_dict,
- char *node)
+glusterd_get_gsync_status_mst(glusterd_volinfo_t *volinfo, dict_t *rsp_dict,
+ char *node)
{
- glusterd_gsync_status_temp_t param = {0, };
+ glusterd_gsync_status_temp_t param = {
+ 0,
+ };
- GF_ASSERT (volinfo);
+ GF_ASSERT(volinfo);
- param.rsp_dict = rsp_dict;
- param.volinfo = volinfo;
- param.node = node;
- dict_foreach (volinfo->gsync_slaves, _get_status_mst_slv, &param);
+ param.rsp_dict = rsp_dict;
+ param.volinfo = volinfo;
+ param.node = node;
+ dict_foreach(volinfo->gsync_slaves, _get_status_mst_slv, &param);
- return 0;
+ return 0;
}
static int
-glusterd_get_gsync_status_all (dict_t *rsp_dict, char *node)
+glusterd_get_gsync_status_all(dict_t *rsp_dict, char *node)
{
+ int32_t ret = 0;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
- int32_t ret = 0;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
+ this = THIS;
+ GF_ASSERT(this);
- priv = this->private;
+ priv = this->private;
- GF_ASSERT (priv);
+ GF_ASSERT(priv);
- cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
- ret = glusterd_get_gsync_status_mst (volinfo, rsp_dict, node);
- if (ret)
- goto out;
- }
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+ ret = glusterd_get_gsync_status_mst(volinfo, rsp_dict, node);
+ if (ret)
+ goto out;
+ }
out:
- gf_msg_debug (this->name, 0, "Returning with %d", ret);
- return ret;
-
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ return ret;
}
static int
-glusterd_get_gsync_status (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+glusterd_get_gsync_status(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- char *slave = NULL;
- char *volname = NULL;
- char *conf_path = NULL;
- char errmsg[PATH_MAX] = {0, };
- gf_boolean_t exists = _gf_false;
- glusterd_volinfo_t *volinfo = NULL;
- int ret = 0;
- char my_hostname[256] = {0,};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = gethostname(my_hostname, 256);
- if (ret) {
- /* stick to N/A */
- (void) strcpy (my_hostname, "N/A");
- }
-
- ret = dict_get_str (dict, "master", &volname);
- if (ret < 0){
- ret = glusterd_get_gsync_status_all (rsp_dict, my_hostname);
- goto out;
- }
-
- exists = glusterd_check_volume_exists (volname);
- ret = glusterd_volinfo_find (volname, &volinfo);
- if ((ret) || (!exists)) {
- gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
- "volume name does not exist");
- snprintf (errmsg, sizeof(errmsg), "Volume name %s does not"
- " exist", volname);
- *op_errstr = gf_strdup (errmsg);
- ret = -1;
- goto out;
- }
-
-
- ret = dict_get_str (dict, "slave", &slave);
- if (ret < 0) {
- ret = glusterd_get_gsync_status_mst (volinfo,
- rsp_dict, my_hostname);
- goto out;
- }
+ char *slave = NULL;
+ char *volname = NULL;
+ char *conf_path = NULL;
+ char errmsg[PATH_MAX] = {
+ 0,
+ };
+ gf_boolean_t exists = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = 0;
+ char my_hostname[256] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = gethostname(my_hostname, 256);
+ if (ret) {
+ /* stick to N/A */
+ (void)strcpy(my_hostname, "N/A");
+ }
+
+ ret = dict_get_str(dict, "master", &volname);
+ if (ret < 0) {
+ ret = glusterd_get_gsync_status_all(rsp_dict, my_hostname);
+ goto out;
+ }
+
+ exists = glusterd_check_volume_exists(volname);
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if ((ret) || (!exists)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
+ "volume name does not exist");
+ snprintf(errmsg, sizeof(errmsg),
+ "Volume name %s does not"
+ " exist",
+ volname);
+ *op_errstr = gf_strdup(errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "slave", &slave);
+ if (ret < 0) {
+ ret = glusterd_get_gsync_status_mst(volinfo, rsp_dict, my_hostname);
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "conf_path", &conf_path);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch conf file path.");
+ goto out;
+ }
+
+ ret = glusterd_get_gsync_status_mst_slv(volinfo, slave, conf_path, rsp_dict,
+ my_hostname);
- ret = dict_get_str (dict, "conf_path", &conf_path);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch conf file path.");
- goto out;
- }
-
- ret = glusterd_get_gsync_status_mst_slv (volinfo, slave, conf_path,
- rsp_dict, my_hostname);
-
- out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+out:
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_gsync_delete (glusterd_volinfo_t *volinfo, char *slave,
- char *slave_host, char *slave_vol, char *path_list,
- dict_t *dict, dict_t *resp_dict, char **op_errstr)
+glusterd_gsync_delete(glusterd_volinfo_t *volinfo, char *slave,
+ char *slave_host, char *slave_vol, char *path_list,
+ dict_t *dict, dict_t *resp_dict, char **op_errstr)
{
- int32_t ret = -1;
- runner_t runner = {0,};
- glusterd_conf_t *priv = NULL;
- char *master = NULL;
- char *gl_workdir = NULL;
- char geo_rep_dir[PATH_MAX] = "";
- char *conf_path = NULL;
- xlator_t *this = NULL;
- uint32_t reset_sync_time = _gf_false;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (slave);
- GF_ASSERT (slave_host);
- GF_ASSERT (slave_vol);
- GF_ASSERT (op_errstr);
- GF_ASSERT (dict);
- GF_ASSERT (resp_dict);
-
- if (THIS)
- priv = THIS->private;
- if (priv == NULL) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
- "priv of glusterd not present");
- *op_errstr = gf_strdup ("glusterd defunct");
- goto out;
- }
-
- ret = dict_get_str (dict, "conf_path", &conf_path);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch conf file path.");
- goto out;
- }
-
- gl_workdir = priv->workdir;
- master = "";
- runinit (&runner);
- runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd",
- "--delete", "-c", NULL);
- runner_argprintf (&runner, "%s", conf_path);
- runner_argprintf (&runner, "--iprefix=%s", DATADIR);
-
- runner_argprintf (&runner, "--path-list=%s", path_list);
-
- ret = dict_get_uint32 (dict, "reset-sync-time", &reset_sync_time);
- if (!ret && reset_sync_time) {
- runner_add_args (&runner, "--reset-sync-time", NULL);
- }
-
- if (volinfo) {
- master = volinfo->volname;
- runner_argprintf (&runner, ":%s", master);
- }
- runner_add_arg (&runner, slave);
- runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
- synclock_unlock (&priv->big_lock);
- ret = runner_run (&runner);
- synclock_lock (&priv->big_lock);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_DEL_FAILED,
- "gsyncd failed to delete session info for %s and "
- "%s peers", master, slave);
-
- gf_asprintf (op_errstr, "gsyncd failed to "
- "delete session info for %s and %s peers",
- master, slave);
-
- goto out;
- }
-
- ret = snprintf (geo_rep_dir, sizeof(geo_rep_dir) - 1,
- "%s/"GEOREP"/%s_%s_%s", gl_workdir,
- volinfo->volname, slave_host, slave_vol);
- geo_rep_dir[ret] = '\0';
-
- ret = sys_rmdir (geo_rep_dir);
- if (ret) {
- if (errno == ENOENT)
- gf_msg_debug (this->name, 0, "Geo Rep Dir(%s) Not Present.",
- geo_rep_dir);
- else {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DIR_OP_FAILED,
- "Unable to delete Geo Rep Dir(%s). Error: %s",
- geo_rep_dir, strerror (errno));
- goto out;
- }
- }
-
- ret = 0;
-
- gf_asprintf (op_errstr, "delete successful");
+ int32_t ret = -1;
+ runner_t runner = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+ char *master = NULL;
+ char *gl_workdir = NULL;
+ char geo_rep_dir[PATH_MAX] = "";
+ char *conf_path = NULL;
+ xlator_t *this = NULL;
+ uint32_t reset_sync_time = _gf_false;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(slave);
+ GF_ASSERT(slave_host);
+ GF_ASSERT(slave_vol);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(dict);
+ GF_ASSERT(resp_dict);
+
+ if (THIS)
+ priv = THIS->private;
+ if (priv == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
+ "priv of glusterd not present");
+ *op_errstr = gf_strdup("glusterd defunct");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "conf_path", &conf_path);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch conf file path.");
+ goto out;
+ }
+
+ gl_workdir = priv->workdir;
+ master = "";
+ runinit(&runner);
+ runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "--delete", "-c", NULL);
+ runner_argprintf(&runner, "%s", conf_path);
+ runner_argprintf(&runner, "--iprefix=%s", DATADIR);
+
+ runner_argprintf(&runner, "--path-list=%s", path_list);
+
+ ret = dict_get_uint32(dict, "reset-sync-time", &reset_sync_time);
+ if (!ret && reset_sync_time) {
+ runner_add_args(&runner, "--reset-sync-time", NULL);
+ }
+
+ if (volinfo) {
+ master = volinfo->volname;
+ runner_argprintf(&runner, ":%s", master);
+ }
+ runner_add_arg(&runner, slave);
+ runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
+ synclock_unlock(&priv->big_lock);
+ ret = runner_run(&runner);
+ synclock_lock(&priv->big_lock);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_DEL_FAILED,
+ "gsyncd failed to delete session info for %s and "
+ "%s peers",
+ master, slave);
+
+ gf_asprintf(op_errstr,
+ "gsyncd failed to "
+ "delete session info for %s and %s peers",
+ master, slave);
+
+ goto out;
+ }
+
+ ret = snprintf(geo_rep_dir, sizeof(geo_rep_dir) - 1,
+ "%s/" GEOREP "/%s_%s_%s", gl_workdir, volinfo->volname,
+ slave_host, slave_vol);
+ geo_rep_dir[ret] = '\0';
+
+ ret = sys_rmdir(geo_rep_dir);
+ if (ret) {
+ if (errno == ENOENT)
+ gf_msg_debug(this->name, 0, "Geo Rep Dir(%s) Not Present.",
+ geo_rep_dir);
+ else {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
+ "Unable to delete Geo Rep Dir(%s). Error: %s", geo_rep_dir,
+ strerror(errno));
+ goto out;
+ }
+ }
+
+ ret = 0;
+
+ gf_asprintf(op_errstr, "delete successful");
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_op_sys_exec (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+glusterd_op_sys_exec(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- char buf[PATH_MAX] = "";
- char cmd_arg_name[PATH_MAX] = "";
- char output_name[PATH_MAX] = "";
- char errmsg[PATH_MAX] = "";
- char *ptr = NULL;
- char *bufp = NULL;
- char *command = NULL;
- char **cmd_args = NULL;
- int ret = -1;
- int i = -1;
- int cmd_args_count = 0;
- int output_count = 0;
- glusterd_conf_t *priv = NULL;
- runner_t runner = {0,};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
- GF_ASSERT (rsp_dict);
-
- if (THIS)
- priv = THIS->private;
- if (priv == NULL) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
- "priv of glusterd not present");
- *op_errstr = gf_strdup ("glusterd defunct");
+ char buf[PATH_MAX] = "";
+ char cmd_arg_name[PATH_MAX] = "";
+ char output_name[PATH_MAX] = "";
+ char errmsg[PATH_MAX] = "";
+ char *ptr = NULL;
+ char *bufp = NULL;
+ char *command = NULL;
+ char **cmd_args = NULL;
+ int ret = -1;
+ int i = -1;
+ int cmd_args_count = 0;
+ int output_count = 0;
+ glusterd_conf_t *priv = NULL;
+ runner_t runner = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(rsp_dict);
+
+ if (THIS)
+ priv = THIS->private;
+ if (priv == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
+ "priv of glusterd not present");
+ *op_errstr = gf_strdup("glusterd defunct");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "command", &command);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get command from dict");
+ goto out;
+ }
+
+ ret = dict_get_int32(dict, "cmd_args_count", &cmd_args_count);
+ if (ret)
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
+ "No cmd_args_count");
+
+ if (cmd_args_count) {
+ cmd_args = GF_CALLOC(cmd_args_count, sizeof(char *), gf_common_mt_char);
+ if (!cmd_args) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Unable to calloc. Errno = %s", strerror(errno));
+ goto out;
+ }
+
+ for (i = 1; i <= cmd_args_count; i++) {
+ snprintf(cmd_arg_name, sizeof(cmd_arg_name), "cmd_arg_%d", i);
+ ret = dict_get_str(dict, cmd_arg_name, &cmd_args[i - 1]);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get"
+ " %s in dict",
+ cmd_arg_name);
goto out;
+ }
}
+ }
+
+ runinit(&runner);
+ runner_argprintf(&runner, GSYNCD_PREFIX "/peer_%s", command);
+ for (i = 0; i < cmd_args_count; i++)
+ runner_add_arg(&runner, cmd_args[i]);
+ runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
+ synclock_unlock(&priv->big_lock);
+ ret = runner_start(&runner);
+ if (ret == -1) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to "
+ "execute command. Error : %s",
+ strerror(errno));
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CMD_EXEC_FAIL, "%s", errmsg);
+ ret = -1;
+ synclock_lock(&priv->big_lock);
+ goto out;
+ }
+
+ do {
+ ptr = fgets(buf, sizeof(buf), runner_chio(&runner, STDOUT_FILENO));
+ if (ptr) {
+ ret = dict_get_int32(rsp_dict, "output_count", &output_count);
+ if (ret)
+ output_count = 1;
+ else
+ output_count++;
+ snprintf(output_name, sizeof(output_name), "output_%d",
+ output_count);
+ if (buf[strlen(buf) - 1] == '\n')
+ buf[strlen(buf) - 1] = '\0';
+ bufp = gf_strdup(buf);
+ if (!bufp)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STRDUP_FAILED,
+ "gf_strdup failed.");
+ ret = dict_set_dynstr(rsp_dict, output_name, bufp);
+ if (ret) {
+ GF_FREE(bufp);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "output set "
+ "failed.");
+ }
+ ret = dict_set_int32(rsp_dict, "output_count", output_count);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "output_count "
+ "set failed.");
+ }
+ } while (ptr);
+
+ ret = runner_end(&runner);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to "
+ "end. Error : %s",
+ strerror(errno));
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNABLE_TO_END, "%s", errmsg);
+ ret = -1;
+ synclock_lock(&priv->big_lock);
+ goto out;
+ }
+ synclock_lock(&priv->big_lock);
+
+ ret = 0;
+out:
+ if (cmd_args) {
+ GF_FREE(cmd_args);
+ cmd_args = NULL;
+ }
+
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
+}
- ret = dict_get_str (dict, "command", &command);
+int
+glusterd_op_copy_file(dict_t *dict, char **op_errstr)
+{
+ char abs_filename[PATH_MAX] = "";
+ char errmsg[PATH_MAX] = "";
+ char *filename = NULL;
+ char *host_uuid = NULL;
+ char uuid_str[64] = {0};
+ char *contents = NULL;
+ char buf[4096] = "";
+ int ret = -1;
+ int fd = -1;
+ int bytes_writen = 0;
+ int bytes_read = 0;
+ int contents_size = -1;
+ int file_mode = -1;
+ glusterd_conf_t *priv = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ gf_boolean_t free_contents = _gf_true;
+ xlator_t *this = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (THIS)
+ priv = THIS->private;
+ if (priv == NULL) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
+ "priv of glusterd not present");
+ *op_errstr = gf_strdup("glusterd defunct");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "host-uuid", &host_uuid);
+ if (ret < 0)
+ goto out;
+
+ ret = dict_get_str(dict, "source", &filename);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch filename from dict.");
+ *op_errstr = gf_strdup("command unsuccessful");
+ goto out;
+ }
+ len = snprintf(abs_filename, sizeof(abs_filename), "%s/%s", priv->workdir,
+ filename);
+ if ((len < 0) || (len >= sizeof(abs_filename))) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_utoa_r(MY_UUID, uuid_str);
+ if (!strcmp(uuid_str, host_uuid)) {
+ ret = sys_lstat(abs_filename, &stbuf);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to get command from dict");
- goto out;
+ len = snprintf(errmsg, sizeof(errmsg),
+ "Source file "
+ "does not exist in %s",
+ priv->workdir);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED,
+ "%s", errmsg);
+ goto out;
+ }
+
+ contents = GF_CALLOC(1, stbuf.st_size + 1, gf_common_mt_char);
+ if (!contents) {
+ snprintf(errmsg, sizeof(errmsg), "Unable to allocate memory");
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ fd = open(abs_filename, O_RDONLY);
+ if (fd < 0) {
+ len = snprintf(errmsg, sizeof(errmsg), "Unable to open %s",
+ abs_filename);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
}
- ret = dict_get_int32 (dict, "cmd_args_count", &cmd_args_count);
- if (ret)
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
- "No cmd_args_count");
-
- if (cmd_args_count) {
- cmd_args = GF_CALLOC (cmd_args_count, sizeof (char*),
- gf_common_mt_char);
- if (!cmd_args) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY,
- "Unable to calloc. Errno = %s",
- strerror(errno));
- goto out;
- }
+ do {
+ ret = sys_read(fd, buf, sizeof(buf));
+ if (ret > 0) {
+ memcpy(contents + bytes_read, buf, ret);
+ bytes_read += ret;
+ }
+ } while (ret > 0);
- for (i=1; i <= cmd_args_count; i++) {
- snprintf (cmd_arg_name, sizeof(cmd_arg_name),
- "cmd_arg_%d", i);
- ret = dict_get_str (dict, cmd_arg_name, &cmd_args[i-1]);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get"
- " %s in dict", cmd_arg_name);
- goto out;
- }
- }
+ if (bytes_read != stbuf.st_size) {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "Unable to read all the data from %s", abs_filename);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_READ_ERROR, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
}
- runinit (&runner);
- runner_argprintf (&runner, GSYNCD_PREFIX"/peer_%s", command);
- for (i=0; i < cmd_args_count; i++)
- runner_add_arg (&runner, cmd_args[i]);
- runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
- synclock_unlock (&priv->big_lock);
- ret = runner_start (&runner);
- if (ret == -1) {
- snprintf (errmsg, sizeof (errmsg), "Unable to "
- "execute command. Error : %s",
- strerror (errno));
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_CMD_EXEC_FAIL, "%s",
- errmsg);
- ret = -1;
- synclock_lock (&priv->big_lock);
- goto out;
+ ret = dict_set_int32(dict, "contents_size", stbuf.st_size);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to set"
+ " contents size in dict.");
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "%s",
+ errmsg);
+ goto out;
}
- do {
- ptr = fgets(buf, sizeof(buf), runner_chio (&runner, STDOUT_FILENO));
- if (ptr) {
- ret = dict_get_int32 (rsp_dict, "output_count", &output_count);
- if (ret)
- output_count = 1;
- else
- output_count++;
- snprintf (output_name, sizeof (output_name),
- "output_%d", output_count);
- if (buf[strlen(buf) - 1] == '\n')
- buf[strlen(buf) - 1] = '\0';
- bufp = gf_strdup (buf);
- if (!bufp)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STRDUP_FAILED,
- "gf_strdup failed.");
- ret = dict_set_dynstr (rsp_dict, output_name, bufp);
- if (ret) {
- GF_FREE (bufp);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "output set "
- "failed.");
- }
- ret = dict_set_int32 (rsp_dict, "output_count", output_count);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "output_count "
- "set failed.");
- }
- } while (ptr);
-
- ret = runner_end (&runner);
+ ret = dict_set_int32(dict, "file_mode", (int32_t)stbuf.st_mode);
if (ret) {
- snprintf (errmsg, sizeof (errmsg), "Unable to "
- "end. Error : %s",
- strerror (errno));
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_UNABLE_TO_END, "%s",
- errmsg);
- ret = -1;
- synclock_lock (&priv->big_lock);
- goto out;
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to set"
+ " file mode in dict.");
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "%s",
+ errmsg);
+ goto out;
}
- synclock_lock (&priv->big_lock);
- ret = 0;
-out:
- if (cmd_args) {
- GF_FREE (cmd_args);
- cmd_args = NULL;
+ ret = dict_set_bin(dict, "common_pem_contents", contents,
+ stbuf.st_size);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to set"
+ " pem contents in dict.");
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+ free_contents = _gf_false;
+ } else {
+ free_contents = _gf_false;
+ ret = dict_get_bin(dict, "common_pem_contents", (void **)&contents);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to get"
+ " pem contents in dict.");
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+ ret = dict_get_int32(dict, "contents_size", &contents_size);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to set"
+ " contents size in dict.");
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ goto out;
}
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
-}
-
-int
-glusterd_op_copy_file (dict_t *dict, char **op_errstr)
-{
- char abs_filename[PATH_MAX] = "";
- char errmsg[PATH_MAX] = "";
- char *filename = NULL;
- char *host_uuid = NULL;
- char uuid_str [64] = {0};
- char *contents = NULL;
- char buf[4096] = "";
- int ret = -1;
- int fd = -1;
- int bytes_writen = 0;
- int bytes_read = 0;
- int contents_size = -1;
- int file_mode = -1;
- glusterd_conf_t *priv = NULL;
- struct stat stbuf = {0,};
- gf_boolean_t free_contents = _gf_true;
- xlator_t *this = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
-
- if (THIS)
- priv = THIS->private;
- if (priv == NULL) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_PRIV_NOT_FOUND,
- "priv of glusterd not present");
- *op_errstr = gf_strdup ("glusterd defunct");
- goto out;
+ ret = dict_get_int32(dict, "file_mode", &file_mode);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to get"
+ " file mode in dict.");
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+
+ fd = open(abs_filename, O_WRONLY | O_TRUNC | O_CREAT, 0600);
+ if (fd < 0) {
+ len = snprintf(errmsg, sizeof(errmsg), "Unable to open %s",
+ abs_filename);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
}
- ret = dict_get_str (dict, "host-uuid", &host_uuid);
- if (ret < 0)
- goto out;
+ bytes_writen = sys_write(fd, contents, contents_size);
- ret = dict_get_str (dict, "source", &filename);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch filename from dict.");
- *op_errstr = gf_strdup ("command unsuccessful");
- goto out;
- }
- len = snprintf (abs_filename, sizeof(abs_filename),
- "%s/%s", priv->workdir, filename);
- if ((len < 0) || (len >= sizeof(abs_filename))) {
- ret = -1;
- goto out;
+ if (bytes_writen != contents_size) {
+ len = snprintf(errmsg, sizeof(errmsg), "Failed to write to %s",
+ abs_filename);
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
}
- uuid_utoa_r (MY_UUID, uuid_str);
- if (!strcmp (uuid_str, host_uuid)) {
- ret = sys_lstat (abs_filename, &stbuf);
- if (ret) {
- len = snprintf (errmsg, sizeof (errmsg), "Source file "
- "does not exist in %s", priv->workdir);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, ENOENT,
- GD_MSG_FILE_OP_FAILED, "%s", errmsg);
- goto out;
- }
+ sys_fchmod(fd, file_mode);
+ }
- contents = GF_CALLOC(1, stbuf.st_size+1, gf_common_mt_char);
- if (!contents) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to allocate memory");
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY,
- "%s", errmsg);
- ret = -1;
- goto out;
- }
-
- fd = open (abs_filename, O_RDONLY);
- if (fd < 0) {
- len = snprintf (errmsg, sizeof (errmsg),
- "Unable to open %s", abs_filename);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FILE_OP_FAILED,
- "%s", errmsg);
- ret = -1;
- goto out;
- }
-
- do {
- ret = sys_read (fd, buf, sizeof(buf));
- if (ret > 0) {
- memcpy (contents+bytes_read, buf, ret);
- bytes_read += ret;
- }
- } while (ret > 0);
-
- if (bytes_read != stbuf.st_size) {
- len = snprintf (errmsg, sizeof (errmsg),
- "Unable to read all the data from %s",
- abs_filename);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_READ_ERROR,
- "%s", errmsg);
- ret = -1;
- goto out;
- }
-
- ret = dict_set_int32 (dict, "contents_size", stbuf.st_size);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg), "Unable to set"
- " contents size in dict.");
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "%s", errmsg);
- goto out;
- }
-
- ret = dict_set_int32 (dict, "file_mode",
- (int32_t)stbuf.st_mode);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg), "Unable to set"
- " file mode in dict.");
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "%s", errmsg);
- goto out;
- }
-
- ret = dict_set_bin (dict, "common_pem_contents",
- contents, stbuf.st_size);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg), "Unable to set"
- " pem contents in dict.");
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "%s", errmsg);
- goto out;
- }
- free_contents = _gf_false;
- } else {
- free_contents = _gf_false;
- ret = dict_get_bin (dict, "common_pem_contents",
- (void **) &contents);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg), "Unable to get"
- " pem contents in dict.");
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", errmsg);
- goto out;
- }
- ret = dict_get_int32 (dict, "contents_size", &contents_size);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg), "Unable to set"
- " contents size in dict.");
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", errmsg);
- goto out;
- }
-
- ret = dict_get_int32 (dict, "file_mode", &file_mode);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg), "Unable to get"
- " file mode in dict.");
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", errmsg);
- goto out;
- }
-
- fd = open (abs_filename, O_WRONLY | O_TRUNC | O_CREAT, 0600);
- if (fd < 0) {
- len = snprintf (errmsg, sizeof (errmsg),
- "Unable to open %s", abs_filename);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FILE_OP_FAILED, "%s", errmsg);
- ret = -1;
- goto out;
- }
-
- bytes_writen = sys_write (fd, contents, contents_size);
-
- if (bytes_writen != contents_size) {
- len = snprintf (errmsg, sizeof (errmsg),
- "Failed to write to %s", abs_filename);
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FILE_OP_FAILED, "%s", errmsg);
- ret = -1;
- goto out;
- }
-
- sys_fchmod (fd, file_mode);
- }
-
- ret = 0;
+ ret = 0;
out:
- if (fd != -1)
- sys_close (fd);
+ if (fd != -1)
+ sys_close(fd);
- if (free_contents)
- GF_FREE(contents);
+ if (free_contents)
+ GF_FREE(contents);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_op_gsync_set (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+glusterd_op_gsync_set(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- int32_t ret = -1;
- int32_t type = -1;
- char *host_uuid = NULL;
- char *slave = NULL;
- char *slave_url = NULL;
- char *slave_vol = NULL;
- char *slave_host = NULL;
- char *volname = NULL;
- char *path_list = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_conf_t *priv = NULL;
- gf_boolean_t is_force = _gf_false;
- char *status_msg = NULL;
- gf_boolean_t is_running = _gf_false;
- char *conf_path = NULL;
- char *key = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
- GF_ASSERT (rsp_dict);
-
- ret = dict_get_int32 (dict, "type", &type);
- if (ret < 0)
- goto out;
+ int32_t ret = -1;
+ int32_t type = -1;
+ char *host_uuid = NULL;
+ char *slave = NULL;
+ char *slave_url = NULL;
+ char *slave_vol = NULL;
+ char *slave_host = NULL;
+ char *volname = NULL;
+ char *path_list = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t is_force = _gf_false;
+ char *status_msg = NULL;
+ gf_boolean_t is_running = _gf_false;
+ char *conf_path = NULL;
+ char *key = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(rsp_dict);
+
+ ret = dict_get_int32(dict, "type", &type);
+ if (ret < 0)
+ goto out;
+
+ ret = dict_get_str(dict, "host-uuid", &host_uuid);
+ if (ret < 0)
+ goto out;
+
+ if (type == GF_GSYNC_OPTION_TYPE_STATUS) {
+ ret = glusterd_get_gsync_status(dict, op_errstr, rsp_dict);
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "slave", &slave);
+ if (ret < 0)
+ goto out;
+
+ key = slave;
+
+ ret = dict_get_str(dict, "slave_url", &slave_url);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch slave url.");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "slave_host", &slave_host);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch slave hostname.");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "slave_vol", &slave_vol);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch slave volume name.");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "conf_path", &conf_path);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch conf file path.");
+ goto out;
+ }
+
+ if (dict_get_str(dict, "master", &volname) == 0) {
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "Volinfo for"
+ " %s (master) not found",
+ volname);
+ goto out;
+ }
- ret = dict_get_str (dict, "host-uuid", &host_uuid);
- if (ret < 0)
- goto out;
+ ret = glusterd_get_local_brickpaths(volinfo, &path_list);
+ if (!path_list && ret == -1)
+ goto out;
+ }
- if (type == GF_GSYNC_OPTION_TYPE_STATUS) {
- ret = glusterd_get_gsync_status (dict, op_errstr, rsp_dict);
+ if (type == GF_GSYNC_OPTION_TYPE_CONFIG) {
+ ret = glusterd_gsync_configure(volinfo, slave, path_list, dict,
+ rsp_dict, op_errstr);
+ if (!ret) {
+ ret = dict_set_str(rsp_dict, "conf_path", conf_path);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to store conf_file_path.");
goto out;
+ }
}
+ goto out;
+ }
- ret = dict_get_str (dict, "slave", &slave);
- if (ret < 0)
- goto out;
+ if (type == GF_GSYNC_OPTION_TYPE_DELETE) {
+ ret = glusterd_remove_slave_in_info(volinfo, slave, op_errstr);
+ if (ret && !is_force && path_list)
+ goto out;
- key = slave;
+ ret = glusterd_gsync_delete(volinfo, slave, slave_host, slave_vol,
+ path_list, dict, rsp_dict, op_errstr);
+ goto out;
+ }
- ret = dict_get_str (dict, "slave_url", &slave_url);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch slave url.");
- goto out;
- }
+ if (!volinfo) {
+ ret = -1;
+ goto out;
+ }
- ret = dict_get_str (dict, "slave_host", &slave_host);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch slave hostname.");
- goto out;
- }
+ is_force = dict_get_str_boolean(dict, "force", _gf_false);
- ret = dict_get_str (dict, "slave_vol", &slave_vol);
+ if (type == GF_GSYNC_OPTION_TYPE_START) {
+ /* Add slave to the dict indicating geo-rep session is running*/
+ ret = dict_set_dynstr_with_alloc(volinfo->gsync_active_slaves, key,
+ "running");
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch slave volume name.");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set key:%s"
+ " value:running in the dict",
+ key);
+ goto out;
}
- ret = dict_get_str (dict, "conf_path", &conf_path);
+ /* If slave volume uuid is not present in gsync_slaves
+ * update it*/
+ ret = glusterd_update_slave_voluuid_slaveinfo(volinfo);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch conf file path.");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL,
+ "Error in updating"
+ " slave volume uuid for old slave info");
+ goto out;
}
- if (dict_get_str (dict, "master", &volname) == 0) {
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED, "Volinfo for"
- " %s (master) not found", volname);
- goto out;
- }
-
- ret = glusterd_get_local_brickpaths (volinfo, &path_list);
- if (!path_list && ret == -1)
- goto out;
- }
+ ret = glusterd_start_gsync(volinfo, slave, path_list, conf_path,
+ host_uuid, op_errstr, _gf_false);
- if (type == GF_GSYNC_OPTION_TYPE_CONFIG) {
- ret = glusterd_gsync_configure (volinfo, slave, path_list,
- dict, rsp_dict, op_errstr);
- if (!ret) {
- ret = dict_set_str (rsp_dict, "conf_path", conf_path);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Unable to store conf_file_path.");
- goto out;
- }
- }
- goto out;
- }
-
- if (type == GF_GSYNC_OPTION_TYPE_DELETE) {
- ret = glusterd_remove_slave_in_info(volinfo, slave, op_errstr);
- if (ret && !is_force && path_list)
- goto out;
-
- ret = glusterd_gsync_delete (volinfo, slave, slave_host,
- slave_vol, path_list, dict,
- rsp_dict, op_errstr);
- goto out;
- }
-
- if (!volinfo) {
- ret = -1;
+ /* Delete added slave in the dict if start fails*/
+ if (ret)
+ dict_del(volinfo->gsync_active_slaves, key);
+ }
+
+ if (type == GF_GSYNC_OPTION_TYPE_STOP ||
+ type == GF_GSYNC_OPTION_TYPE_PAUSE ||
+ type == GF_GSYNC_OPTION_TYPE_RESUME) {
+ ret = glusterd_check_gsync_running_local(volinfo->volname, slave,
+ conf_path, &is_running);
+ if (!ret && !is_force && path_list && (_gf_true != is_running)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GSYNCD_OP_SET_FAILED,
+ GEOREP
+ " is not "
+ "set up for %s(master) and %s(slave)",
+ volname, slave);
+ *op_errstr = gf_strdup(GEOREP " is not set up");
+ goto out;
+ }
+
+ if (type == GF_GSYNC_OPTION_TYPE_PAUSE) {
+ ret = gd_pause_or_resume_gsync(dict, volname, slave, slave_host,
+ slave_vol, conf_path, op_errstr,
+ _gf_true);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PAUSE_FAILED,
+ GEOREP " Pause Failed");
+ else
+ dict_del(volinfo->gsync_active_slaves, key);
+
+ } else if (type == GF_GSYNC_OPTION_TYPE_RESUME) {
+ /* Add slave to the dict indicating geo-rep session is
+ * running*/
+ ret = dict_set_dynstr_with_alloc(volinfo->gsync_active_slaves, key,
+ "running");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set "
+ "key:%s value:running in dict",
+ key);
goto out;
- }
-
- is_force = dict_get_str_boolean (dict, "force", _gf_false);
-
- if (type == GF_GSYNC_OPTION_TYPE_START) {
- /* Add slave to the dict indicating geo-rep session is running*/
- ret = dict_set_dynstr_with_alloc (volinfo->gsync_active_slaves,
- key, "running");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Unable to set key:%s"
- " value:running in the dict", key);
- goto out;
- }
+ }
- /* If slave volume uuid is not present in gsync_slaves
- * update it*/
- ret = glusterd_update_slave_voluuid_slaveinfo (volinfo);
+ ret = gd_pause_or_resume_gsync(dict, volname, slave, slave_host,
+ slave_vol, conf_path, op_errstr,
+ _gf_false);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESUME_FAILED,
+ GEOREP " Resume Failed");
+ dict_del(volinfo->gsync_active_slaves, key);
+ }
+ } else {
+ ret = stop_gsync(volname, slave, &status_msg, conf_path, op_errstr,
+ is_force);
+
+ if (ret == 0 && status_msg)
+ ret = dict_set_str(rsp_dict, "gsync-status", status_msg);
+ if (!ret) {
+ ret = glusterd_create_status_file(
+ volinfo->volname, slave, slave_host, slave_vol, "Stopped");
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REMOTE_VOL_UUID_FAIL, "Error in updating"
- " slave volume uuid for old slave info");
- goto out;
- }
-
- ret = glusterd_start_gsync (volinfo, slave, path_list,
- conf_path, host_uuid, op_errstr,
- _gf_false);
-
- /* Delete added slave in the dict if start fails*/
- if (ret)
- dict_del (volinfo->gsync_active_slaves, key);
- }
-
- if (type == GF_GSYNC_OPTION_TYPE_STOP ||
- type == GF_GSYNC_OPTION_TYPE_PAUSE ||
- type == GF_GSYNC_OPTION_TYPE_RESUME) {
- ret = glusterd_check_gsync_running_local (volinfo->volname,
- slave, conf_path,
- &is_running);
- if (!ret && !is_force && path_list &&
- (_gf_true != is_running)) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_GSYNCD_OP_SET_FAILED, GEOREP" is not "
- "set up for %s(master) and %s(slave)",
- volname, slave);
- *op_errstr = gf_strdup (GEOREP" is not set up");
- goto out;
- }
-
- if (type == GF_GSYNC_OPTION_TYPE_PAUSE) {
- ret = gd_pause_or_resume_gsync (dict, volname, slave,
- slave_host, slave_vol,
- conf_path, op_errstr,
- _gf_true);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PAUSE_FAILED,
- GEOREP" Pause Failed");
- else
- dict_del (volinfo->gsync_active_slaves, key);
-
- } else if (type == GF_GSYNC_OPTION_TYPE_RESUME) {
-
- /* Add slave to the dict indicating geo-rep session is
- * running*/
- ret = dict_set_dynstr_with_alloc (
- volinfo->gsync_active_slaves,
- key, "running");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Unable to set "
- "key:%s value:running in dict", key);
- goto out;
- }
-
- ret = gd_pause_or_resume_gsync (dict, volname, slave,
- slave_host, slave_vol,
- conf_path, op_errstr,
- _gf_false);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_RESUME_FAILED,
- GEOREP" Resume Failed");
- dict_del (volinfo->gsync_active_slaves, key);
- }
- } else {
-
- ret = stop_gsync (volname, slave, &status_msg,
- conf_path, op_errstr, is_force);
-
- if (ret == 0 && status_msg)
- ret = dict_set_str (rsp_dict, "gsync-status",
- status_msg);
- if (!ret) {
- ret = glusterd_create_status_file (
- volinfo->volname,
- slave, slave_host,
- slave_vol,"Stopped");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_UPDATE_STATEFILE_FAILED,
- "Unable to update state_file. "
- "Error : %s", strerror (errno));
- }
- dict_del (volinfo->gsync_active_slaves, key);
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_UPDATE_STATEFILE_FAILED,
+ "Unable to update state_file. "
+ "Error : %s",
+ strerror(errno));
}
+ dict_del(volinfo->gsync_active_slaves, key);
+ }
}
+ }
out:
- if (path_list) {
- GF_FREE (path_list);
- path_list = NULL;
- }
+ if (path_list) {
+ GF_FREE(path_list);
+ path_list = NULL;
+ }
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_get_slave_details_confpath (glusterd_volinfo_t *volinfo,
- dict_t *dict, char **slave_url,
- char **slave_host, char **slave_vol,
- char **conf_path, char **op_errstr)
+glusterd_get_slave_details_confpath(glusterd_volinfo_t *volinfo, dict_t *dict,
+ char **slave_url, char **slave_host,
+ char **slave_vol, char **conf_path,
+ char **op_errstr)
{
- int ret = -1;
- char confpath[PATH_MAX] = "";
- glusterd_conf_t *priv = NULL;
- char *slave = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_str (dict, "slave", &slave);
- if (ret || !slave) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Unable to fetch slave from dict");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_get_slave_info (slave, slave_url,
- slave_host, slave_vol, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_FETCH_ERROR,
- "Unable to fetch slave details.");
- ret = -1;
- goto out;
- }
-
- ret = dict_set_str (dict, "slave_url", *slave_url);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Unable to store slave IP.");
- goto out;
- }
-
- ret = dict_set_str (dict, "slave_host", *slave_host);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Unable to store slave hostname");
- goto out;
- }
-
- ret = dict_set_str (dict, "slave_vol", *slave_vol);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Unable to store slave volume name.");
- goto out;
- }
-
- ret = snprintf (confpath, sizeof(confpath) - 1,
- "%s/"GEOREP"/%s_%s_%s/gsyncd.conf",
- priv->workdir, volinfo->volname,
- *slave_host, *slave_vol);
- confpath[ret] = '\0';
- *conf_path = gf_strdup (confpath);
- if (!(*conf_path)) {
- gf_msg (this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
- "Unable to gf_strdup. Error: %s", strerror (errno));
- ret = -1;
- goto out;
- }
-
- ret = dict_set_str (dict, "conf_path", *conf_path);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Unable to store conf_path");
- goto out;
- }
+ int ret = -1;
+ char confpath[PATH_MAX] = "";
+ glusterd_conf_t *priv = NULL;
+ char *slave = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_str(dict, "slave", &slave);
+ if (ret || !slave) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch slave from dict");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_get_slave_info(slave, slave_url, slave_host, slave_vol,
+ op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR,
+ "Unable to fetch slave details.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "slave_url", *slave_url);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to store slave IP.");
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "slave_host", *slave_host);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to store slave hostname");
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "slave_vol", *slave_vol);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to store slave volume name.");
+ goto out;
+ }
+
+ ret = snprintf(confpath, sizeof(confpath) - 1,
+ "%s/" GEOREP "/%s_%s_%s/gsyncd.conf", priv->workdir,
+ volinfo->volname, *slave_host, *slave_vol);
+ confpath[ret] = '\0';
+ *conf_path = gf_strdup(confpath);
+ if (!(*conf_path)) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "Unable to gf_strdup. Error: %s", strerror(errno));
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "conf_path", *conf_path);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to store conf_path");
+ goto out;
+ }
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
-
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_get_slave_info (char *slave,
- char **slave_url, char **hostname,
- char **slave_vol, char **op_errstr)
+glusterd_get_slave_info(char *slave, char **slave_url, char **hostname,
+ char **slave_vol, char **op_errstr)
{
- char *tmp = NULL;
- char *save_ptr = NULL;
- char **linearr = NULL;
- int32_t ret = -1;
- char errmsg[PATH_MAX] = "";
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = glusterd_urltransform_single (slave, "normalize",
- &linearr);
- if (ret == -1) {
- ret = snprintf (errmsg, sizeof(errmsg) - 1,
- "Invalid Url: %s", slave);
- errmsg[ret] = '\0';
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_NORMALIZE_URL_FAIL,
- "Failed to normalize url");
- goto out;
- }
-
- tmp = strtok_r (linearr[0], "/", &save_ptr);
- tmp = strtok_r (NULL, "/", &save_ptr);
- slave = strtok_r (tmp, ":", &save_ptr);
- if (slave) {
- ret = glusterd_geo_rep_parse_slave (slave, hostname, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVE_URL_INVALID,
- "Invalid slave url: %s", *op_errstr);
- goto out;
- }
- gf_msg_debug (this->name, 0, "Hostname : %s", *hostname);
-
- *slave_url = gf_strdup (slave);
- if (!*slave_url) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STRDUP_FAILED,
- "Failed to gf_strdup");
- ret = -1;
- goto out;
- }
- gf_msg_debug (this->name, 0, "Slave URL : %s", *slave_url);
- ret = 0;
- } else {
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "Invalid slave name");
- goto out;
+ char *tmp = NULL;
+ char *save_ptr = NULL;
+ char **linearr = NULL;
+ int32_t ret = -1;
+ char errmsg[PATH_MAX] = "";
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = glusterd_urltransform_single(slave, "normalize", &linearr);
+ if (ret == -1) {
+ ret = snprintf(errmsg, sizeof(errmsg) - 1, "Invalid Url: %s", slave);
+ errmsg[ret] = '\0';
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NORMALIZE_URL_FAIL,
+ "Failed to normalize url");
+ goto out;
+ }
+
+ tmp = strtok_r(linearr[0], "/", &save_ptr);
+ tmp = strtok_r(NULL, "/", &save_ptr);
+ slave = strtok_r(tmp, ":", &save_ptr);
+ if (slave) {
+ ret = glusterd_geo_rep_parse_slave(slave, hostname, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_URL_INVALID,
+ "Invalid slave url: %s", *op_errstr);
+ goto out;
}
+ gf_msg_debug(this->name, 0, "Hostname : %s", *hostname);
- slave = strtok_r (NULL, ":", &save_ptr);
- if (slave) {
- *slave_vol = gf_strdup (slave);
- if (!*slave_vol) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STRDUP_FAILED,
- "Failed to gf_strdup");
- ret = -1;
- GF_FREE (*slave_url);
- goto out;
- }
- gf_msg_debug (this->name, 0, "Slave Vol : %s", *slave_vol);
- ret = 0;
- } else {
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "Invalid slave name");
- goto out;
+ *slave_url = gf_strdup(slave);
+ if (!*slave_url) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STRDUP_FAILED,
+ "Failed to gf_strdup");
+ ret = -1;
+ goto out;
}
+ gf_msg_debug(this->name, 0, "Slave URL : %s", *slave_url);
+ ret = 0;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid slave name");
+ goto out;
+ }
+
+ slave = strtok_r(NULL, ":", &save_ptr);
+ if (slave) {
+ *slave_vol = gf_strdup(slave);
+ if (!*slave_vol) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STRDUP_FAILED,
+ "Failed to gf_strdup");
+ ret = -1;
+ GF_FREE(*slave_url);
+ goto out;
+ }
+ gf_msg_debug(this->name, 0, "Slave Vol : %s", *slave_vol);
+ ret = 0;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid slave name");
+ goto out;
+ }
out:
- if (linearr)
- glusterd_urltransform_free (linearr, 1);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ if (linearr)
+ glusterd_urltransform_free(linearr, 1);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static void
-runinit_gsyncd_setrx (runner_t *runner, char *conf_path)
+runinit_gsyncd_setrx(runner_t *runner, char *conf_path)
{
- runinit (runner);
- runner_add_args (runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL);
- runner_argprintf (runner, "%s", conf_path);
- runner_add_arg (runner, "--config-set-rx");
+ runinit(runner);
+ runner_add_args(runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL);
+ runner_argprintf(runner, "%s", conf_path);
+ runner_add_arg(runner, "--config-set-rx");
}
static int
-glusterd_check_gsync_present (int *valid_state)
+glusterd_check_gsync_present(int *valid_state)
{
- char buff[PATH_MAX] = {0, };
- runner_t runner = {0,};
- char *ptr = NULL;
- int ret = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- runinit (&runner);
- runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "--version", NULL);
- runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
- ret = runner_start (&runner);
- if (ret == -1) {
- if (errno == ENOENT) {
- gf_msg ("glusterd", GF_LOG_INFO, ENOENT,
- GD_MSG_MODULE_NOT_INSTALLED, GEOREP" module "
- "not installed in the system");
- *valid_state = 0;
- }
- else {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_MODULE_ERROR,
- GEOREP" module not working as desired");
- *valid_state = -1;
- }
- goto out;
- }
-
- ptr = fgets(buff, sizeof(buff), runner_chio (&runner, STDOUT_FILENO));
- if (ptr) {
- if (!strstr (buff, "gsyncd")) {
- ret = -1;
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_MODULE_ERROR,
- GEOREP" module not working as desired");
- *valid_state = -1;
- goto out;
- }
+ char buff[PATH_MAX] = {
+ 0,
+ };
+ runner_t runner = {
+ 0,
+ };
+ char *ptr = NULL;
+ int ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ runinit(&runner);
+ runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "--version", NULL);
+ runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
+ ret = runner_start(&runner);
+ if (ret == -1) {
+ if (errno == ENOENT) {
+ gf_msg("glusterd", GF_LOG_INFO, ENOENT, GD_MSG_MODULE_NOT_INSTALLED,
+ GEOREP
+ " module "
+ "not installed in the system");
+ *valid_state = 0;
} else {
- ret = -1;
- gf_msg ("glusterd", GF_LOG_ERROR, 0, GD_MSG_MODULE_ERROR,
- GEOREP" module not working as desired");
- *valid_state = -1;
- goto out;
- }
-
- ret = 0;
- out:
-
- runner_end (&runner);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_MODULE_ERROR,
+ GEOREP " module not working as desired");
+ *valid_state = -1;
+ }
+ goto out;
+ }
+
+ ptr = fgets(buff, sizeof(buff), runner_chio(&runner, STDOUT_FILENO));
+ if (ptr) {
+ if (!strstr(buff, "gsyncd")) {
+ ret = -1;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_MODULE_ERROR,
+ GEOREP " module not working as desired");
+ *valid_state = -1;
+ goto out;
+ }
+ } else {
+ ret = -1;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_MODULE_ERROR,
+ GEOREP " module not working as desired");
+ *valid_state = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ runner_end(&runner);
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
static int
-create_conf_file (glusterd_conf_t *conf, char *conf_path)
-#define RUN_GSYNCD_CMD do { \
- ret = runner_run_reuse (&runner); \
- if (ret == -1) { \
- runner_log (&runner, "glusterd", GF_LOG_ERROR, "command failed"); \
- runner_end (&runner); \
- goto out; \
- } \
- runner_end (&runner); \
-} while (0)
+create_conf_file(glusterd_conf_t *conf, char *conf_path)
+#define RUN_GSYNCD_CMD \
+ do { \
+ ret = runner_run_reuse(&runner); \
+ if (ret == -1) { \
+ runner_log(&runner, "glusterd", GF_LOG_ERROR, "command failed"); \
+ runner_end(&runner); \
+ goto out; \
+ } \
+ runner_end(&runner); \
+ } while (0)
{
- int ret = 0;
- runner_t runner = {0,};
- char georepdir[PATH_MAX] = {0,};
- int valid_state = 0;
-
- valid_state = -1;
- ret = glusterd_check_gsync_present (&valid_state);
- if (-1 == ret) {
- ret = valid_state;
- goto out;
- }
+ int ret = 0;
+ runner_t runner = {
+ 0,
+ };
+ char georepdir[PATH_MAX] = {
+ 0,
+ };
+ int valid_state = 0;
+
+ valid_state = -1;
+ ret = glusterd_check_gsync_present(&valid_state);
+ if (-1 == ret) {
+ ret = valid_state;
+ goto out;
+ }
+
+ ret = snprintf(georepdir, sizeof(georepdir) - 1, "%s/" GEOREP,
+ conf->workdir);
+ georepdir[ret] = '\0';
+
+ /************
+ * master pre-configuration
+ ************/
+
+ /* remote-gsyncd */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "remote-gsyncd", GSYNCD_PREFIX "/gsyncd", ".", ".",
+ NULL);
+ RUN_GSYNCD_CMD;
+
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "remote-gsyncd", "/nonexistent/gsyncd", ".",
+ "^ssh:", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* gluster-command-dir */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "gluster-command-dir", SBIN_DIR "/", ".", ".",
+ NULL);
+ RUN_GSYNCD_CMD;
+
+ /* gluster-params */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "gluster-params", "aux-gfid-mount acl", ".", ".",
+ NULL);
+ RUN_GSYNCD_CMD;
+
+ /* ssh-command */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_arg(&runner, "ssh-command");
+ runner_argprintf(&runner,
+ "ssh -oPasswordAuthentication=no "
+ "-oStrictHostKeyChecking=no "
+ "-i %s/secret.pem",
+ georepdir);
+ runner_add_args(&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* ssh-command tar */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_arg(&runner, "ssh-command-tar");
+ runner_argprintf(&runner,
+ "ssh -oPasswordAuthentication=no "
+ "-oStrictHostKeyChecking=no "
+ "-i %s/tar_ssh.pem",
+ georepdir);
+ runner_add_args(&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* pid-file */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_arg(&runner, "pid-file");
+ runner_argprintf(&runner,
+ "%s/${mastervol}_${remotehost}_${slavevol}/monitor.pid",
+ georepdir);
+ runner_add_args(&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* geo-rep-working-dir */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_arg(&runner, "georep-session-working-dir");
+ runner_argprintf(&runner, "%s/${mastervol}_${remotehost}_${slavevol}/",
+ georepdir);
+ runner_add_args(&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* state-file */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_arg(&runner, "state-file");
+ runner_argprintf(&runner,
+ "%s/${mastervol}_${remotehost}_${slavevol}/monitor.status",
+ georepdir);
+ runner_add_args(&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* state-detail-file */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_arg(&runner, "state-detail-file");
+ runner_argprintf(
+ &runner,
+ "%s/${mastervol}_${remotehost}_${slavevol}/${eSlave}-detail.status",
+ georepdir);
+ runner_add_args(&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* state-socket */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_arg(&runner, "state-socket-unencoded");
+ runner_argprintf(
+ &runner, "%s/${mastervol}_${remotehost}_${slavevol}/${eSlave}.socket",
+ georepdir);
+ runner_add_args(&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* socketdir */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "socketdir", GLUSTERD_SOCK_DIR, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* log-file */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "log-file",
+ DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP
+ "/${mastervol}/${eSlave}.log",
+ ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* changelog-log-file */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "changelog-log-file",
+ DEFAULT_LOG_FILE_DIRECTORY
+ "/" GEOREP "/${mastervol}/${eSlave}${local_id}-changes.log",
+ ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* gluster-log-file */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "gluster-log-file",
+ DEFAULT_LOG_FILE_DIRECTORY
+ "/" GEOREP "/${mastervol}/${eSlave}${local_id}.gluster.log",
+ ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* ignore-deletes */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "ignore-deletes", "false", ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* special-sync-mode */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "special-sync-mode", "partial", ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* change-detector == changelog */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "change-detector", "changelog", ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_arg(&runner, "working-dir");
+ runner_argprintf(&runner, "%s/${mastervol}/${eSlave}",
+ DEFAULT_GLUSTERFSD_MISC_DIRETORY);
+ runner_add_args(&runner, ".", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /************
+ * slave pre-configuration
+ ************/
+
+ /* slave-gluster-command-dir */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "slave-gluster-command-dir", SBIN_DIR "/", ".",
+ NULL);
+ RUN_GSYNCD_CMD;
+
+ /* gluster-params */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(&runner, "gluster-params", "aux-gfid-mount acl", ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* log-file */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(
+ &runner, "log-file",
+ DEFAULT_LOG_FILE_DIRECTORY
+ "/" GEOREP
+ "-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.log",
+ ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* MountBroker log-file */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(
+ &runner, "log-file-mbr",
+ DEFAULT_LOG_FILE_DIRECTORY
+ "/" GEOREP
+ "-slaves/mbr/${session_owner}:${local_node}${local_id}.${slavevol}.log",
+ ".", NULL);
+ RUN_GSYNCD_CMD;
+
+ /* gluster-log-file */
+ runinit_gsyncd_setrx(&runner, conf_path);
+ runner_add_args(
+ &runner, "gluster-log-file",
+ DEFAULT_LOG_FILE_DIRECTORY
+ "/" GEOREP
+ "-slaves/"
+ "${session_owner}:${local_node}${local_id}.${slavevol}.gluster.log",
+ ".", NULL);
+ RUN_GSYNCD_CMD;
- ret = snprintf (georepdir, sizeof(georepdir) - 1, "%s/"GEOREP,
- conf->workdir);
- georepdir[ret] = '\0';
-
- /************
- * master pre-configuration
- ************/
-
- /* remote-gsyncd */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner, "remote-gsyncd", GSYNCD_PREFIX"/gsyncd", ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner, "remote-gsyncd", "/nonexistent/gsyncd",
- ".", "^ssh:", NULL);
- RUN_GSYNCD_CMD;
-
- /* gluster-command-dir */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner, "gluster-command-dir", SBIN_DIR"/",
- ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* gluster-params */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner, "gluster-params",
- "aux-gfid-mount acl",
- ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* ssh-command */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_arg (&runner, "ssh-command");
- runner_argprintf (&runner,
- "ssh -oPasswordAuthentication=no "
- "-oStrictHostKeyChecking=no "
- "-i %s/secret.pem", georepdir);
- runner_add_args (&runner, ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* ssh-command tar */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_arg (&runner, "ssh-command-tar");
- runner_argprintf (&runner,
- "ssh -oPasswordAuthentication=no "
- "-oStrictHostKeyChecking=no "
- "-i %s/tar_ssh.pem", georepdir);
- runner_add_args (&runner, ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* pid-file */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_arg (&runner, "pid-file");
- runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/monitor.pid", georepdir);
- runner_add_args (&runner, ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* geo-rep-working-dir */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_arg (&runner, "georep-session-working-dir");
- runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/", georepdir);
- runner_add_args (&runner, ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* state-file */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_arg (&runner, "state-file");
- runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/monitor.status", georepdir);
- runner_add_args (&runner, ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* state-detail-file */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_arg (&runner, "state-detail-file");
- runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/${eSlave}-detail.status", georepdir);
- runner_add_args (&runner, ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* state-socket */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_arg (&runner, "state-socket-unencoded");
- runner_argprintf (&runner, "%s/${mastervol}_${remotehost}_${slavevol}/${eSlave}.socket", georepdir);
- runner_add_args (&runner, ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* socketdir */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner, "socketdir", GLUSTERD_SOCK_DIR, ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* log-file */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner,
- "log-file",
- DEFAULT_LOG_FILE_DIRECTORY"/"GEOREP"/${mastervol}/${eSlave}.log",
- ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* changelog-log-file */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner,
- "changelog-log-file",
- DEFAULT_LOG_FILE_DIRECTORY"/"GEOREP"/${mastervol}/${eSlave}${local_id}-changes.log",
- ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* gluster-log-file */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner,
- "gluster-log-file",
- DEFAULT_LOG_FILE_DIRECTORY"/"GEOREP"/${mastervol}/${eSlave}${local_id}.gluster.log",
- ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* ignore-deletes */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner, "ignore-deletes", "false", ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* special-sync-mode */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner, "special-sync-mode", "partial", ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* change-detector == changelog */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args(&runner, "change-detector", "changelog", ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_arg(&runner, "working-dir");
- runner_argprintf(&runner, "%s/${mastervol}/${eSlave}",
- DEFAULT_GLUSTERFSD_MISC_DIRETORY);
- runner_add_args (&runner, ".", ".", NULL);
- RUN_GSYNCD_CMD;
-
- /************
- * slave pre-configuration
- ************/
-
- /* slave-gluster-command-dir */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner, "slave-gluster-command-dir", SBIN_DIR"/",
- ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* gluster-params */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner, "gluster-params",
- "aux-gfid-mount acl",
- ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* log-file */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner,
- "log-file",
- DEFAULT_LOG_FILE_DIRECTORY"/"GEOREP"-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.log",
- ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* MountBroker log-file */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner,
- "log-file-mbr",
- DEFAULT_LOG_FILE_DIRECTORY"/"GEOREP"-slaves/mbr/${session_owner}:${local_node}${local_id}.${slavevol}.log",
- ".", NULL);
- RUN_GSYNCD_CMD;
-
- /* gluster-log-file */
- runinit_gsyncd_setrx (&runner, conf_path);
- runner_add_args (&runner,
- "gluster-log-file",
- DEFAULT_LOG_FILE_DIRECTORY"/"GEOREP"-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.gluster.log",
- ".", NULL);
- RUN_GSYNCD_CMD;
-
- out:
- return ret ? -1 : 0;
+out:
+ return ret ? -1 : 0;
}
static int
-glusterd_create_essential_dir_files (glusterd_volinfo_t *volinfo, dict_t *dict,
- char *slave, char *slave_host,
- char *slave_vol, char **op_errstr)
+glusterd_create_essential_dir_files(glusterd_volinfo_t *volinfo, dict_t *dict,
+ char *slave, char *slave_host,
+ char *slave_vol, char **op_errstr)
{
- int ret = -1;
- char *conf_path = NULL;
- char *statefile = NULL;
- char buf[PATH_MAX] = "";
- char errmsg[PATH_MAX] = "";
- glusterd_conf_t *conf = NULL;
- struct stat stbuf = {0,};
- xlator_t *this = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
-
- conf = this->private;
-
- ret = dict_get_str (dict, "conf_path", &conf_path);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch conf file path.");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "%s", errmsg);
- goto out;
- }
-
- ret = dict_get_str (dict, "statefile", &statefile);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch statefile path.");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "%s", errmsg);
- goto out;
- }
-
- ret = snprintf (buf, sizeof(buf), "%s/"GEOREP"/%s_%s_%s",
- conf->workdir, volinfo->volname, slave_host, slave_vol);
- if ((ret < 0) || (ret >= sizeof(buf))) {
- ret = -1;
- goto out;
- }
- ret = mkdir_p (buf, 0777, _gf_true);
- if (ret) {
- len = snprintf (errmsg, sizeof (errmsg), "Unable to create %s"
- ". Error : %s", buf, strerror (errno));
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
- "%s", errmsg);
- goto out;
- }
-
- ret = snprintf (buf, PATH_MAX, DEFAULT_LOG_FILE_DIRECTORY"/"GEOREP"/%s",
- volinfo->volname);
- if ((ret < 0) || (ret >= PATH_MAX)) {
- ret = -1;
- goto out;
- }
- ret = mkdir_p (buf, 0777, _gf_true);
- if (ret) {
- len = snprintf (errmsg, sizeof (errmsg), "Unable to create %s"
- ". Error : %s", buf, strerror (errno));
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
- "%s", errmsg);
- goto out;
- }
-
- ret = sys_lstat (conf_path, &stbuf);
- if (!ret) {
- gf_msg_debug (this->name, 0, "Session already running."
- " Not creating config file again.");
- } else {
- ret = create_conf_file (conf, conf_path);
- if (ret || sys_lstat (conf_path, &stbuf)) {
- snprintf (errmsg, sizeof (errmsg), "Failed to create"
- " config file(%s).", conf_path);
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_FILE_OP_FAILED, "%s", errmsg);
- goto out;
- }
- }
-
- ret = sys_lstat (statefile, &stbuf);
- if (!ret) {
- gf_msg_debug (this->name, 0, "Session already running."
- " Not creating status file again.");
- goto out;
- } else {
- ret = glusterd_create_status_file (volinfo->volname, slave,
- slave_host, slave_vol,
- "Created");
- if (ret || sys_lstat (statefile, &stbuf)) {
- snprintf (errmsg, sizeof (errmsg), "Unable to create %s"
- ". Error : %s", statefile, strerror (errno));
- *op_errstr = gf_strdup (errmsg);
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_FILE_OP_FAILED, "%s", errmsg);
- ret = -1;
- goto out;
- }
- }
+ int ret = -1;
+ char *conf_path = NULL;
+ char *statefile = NULL;
+ char buf[PATH_MAX] = "";
+ char errmsg[PATH_MAX] = "";
+ glusterd_conf_t *conf = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ conf = this->private;
+
+ ret = dict_get_str(dict, "conf_path", &conf_path);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg), "Unable to fetch conf file path.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "statefile", &statefile);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg), "Unable to fetch statefile path.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+
+ ret = snprintf(buf, sizeof(buf), "%s/" GEOREP "/%s_%s_%s", conf->workdir,
+ volinfo->volname, slave_host, slave_vol);
+ if ((ret < 0) || (ret >= sizeof(buf))) {
+ ret = -1;
+ goto out;
+ }
+ ret = mkdir_p(buf, 0777, _gf_true);
+ if (ret) {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "Unable to create %s"
+ ". Error : %s",
+ buf, strerror(errno));
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+
+ ret = snprintf(buf, PATH_MAX, DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP "/%s",
+ volinfo->volname);
+ if ((ret < 0) || (ret >= PATH_MAX)) {
+ ret = -1;
+ goto out;
+ }
+ ret = mkdir_p(buf, 0777, _gf_true);
+ if (ret) {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "Unable to create %s"
+ ". Error : %s",
+ buf, strerror(errno));
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+
+ ret = sys_lstat(conf_path, &stbuf);
+ if (!ret) {
+ gf_msg_debug(this->name, 0,
+ "Session already running."
+ " Not creating config file again.");
+ } else {
+ ret = create_conf_file(conf, conf_path);
+ if (ret || sys_lstat(conf_path, &stbuf)) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Failed to create"
+ " config file(%s).",
+ conf_path);
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+ }
+
+ ret = sys_lstat(statefile, &stbuf);
+ if (!ret) {
+ gf_msg_debug(this->name, 0,
+ "Session already running."
+ " Not creating status file again.");
+ goto out;
+ } else {
+ ret = glusterd_create_status_file(volinfo->volname, slave, slave_host,
+ slave_vol, "Created");
+ if (ret || sys_lstat(statefile, &stbuf)) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to create %s"
+ ". Error : %s",
+ statefile, strerror(errno));
+ *op_errstr = gf_strdup(errmsg);
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
+ }
+ }
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_op_gsync_create (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+glusterd_op_gsync_create(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- char common_pem_file[PATH_MAX] = "";
- char errmsg[PATH_MAX] = {0,};
- char hooks_args[PATH_MAX] = "";
- char uuid_str [64] = "";
- char *host_uuid = NULL;
- char *slave_url = NULL;
- char *slave_url_buf = NULL;
- char *slave_user = NULL;
- char *slave_ip = NULL;
- char *save_ptr = NULL;
- char *slave_host = NULL;
- char *slave_vol = NULL;
- char *arg_buf = NULL;
- char *volname = NULL;
- char *slave = NULL;
- int32_t ret = -1;
- int32_t is_pem_push = -1;
- int32_t ssh_port = 22;
- gf_boolean_t is_force = -1;
- glusterd_conf_t *conf = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char old_working_dir[PATH_MAX] = {0};
- char new_working_dir[PATH_MAX] = {0};
- char *slave_voluuid = NULL;
- char *old_slavehost = NULL;
- gf_boolean_t is_existing_session = _gf_false;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
-
- ret = glusterd_op_gsync_args_get (dict, op_errstr,
- &volname, &slave, &host_uuid);
- if (ret)
- goto out;
-
- len = snprintf (common_pem_file, sizeof(common_pem_file),
- "%s"GLUSTERD_COMMON_PEM_PUB_FILE, conf->workdir);
- if ((len < 0) || (len >= sizeof(common_pem_file))) {
- ret = -1;
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
- "Volinfo for %s (master) not found", volname);
- goto out;
- }
-
- ret = dict_get_str (dict, "slave_vol", &slave_vol);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch slave volume name.");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "%s", errmsg);
- goto out;
- }
-
- ret = dict_get_str (dict, "slave_url", &slave_url);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch slave IP.");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "%s", errmsg);
- ret = -1;
- goto out;
- }
-
- /* Fetch the slave_user and slave_ip from the slave_url.
- * If the slave_user is not present. Use "root"
- */
- if (strstr(slave_url, "@")) {
- slave_url_buf = gf_strdup (slave_url);
- if (!slave_url_buf) {
- ret = -1;
- goto out;
- }
- slave_user = strtok_r (slave_url, "@", &save_ptr);
- slave_ip = strtok_r (NULL, "@", &save_ptr);
- } else {
- slave_user = "root";
- slave_ip = slave_url;
- }
-
- if (!slave_user || !slave_ip) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_URL_INVALID,
- "Invalid slave url.");
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str (dict, "slave_host", &slave_host);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch slave host");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "%s", errmsg);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32 (dict, "ssh_port", &ssh_port);
- if (ret < 0 && ret != -ENOENT) {
- snprintf (errmsg, sizeof (errmsg), "Fetching ssh_port failed");
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "%s", errmsg);
- ret = -1;
- goto out;
- }
-
- is_force = dict_get_str_boolean (dict, "force", _gf_false);
-
- uuid_utoa_r (MY_UUID, uuid_str);
- if (!strcmp (uuid_str, host_uuid)) {
- ret = dict_get_int32 (dict, "push_pem", &is_pem_push);
- if (!ret && is_pem_push) {
- gf_msg_debug (this->name, 0, "Trying to setup"
- " pem files in slave");
- is_pem_push = 1;
- } else
- is_pem_push = 0;
-
- len = snprintf(hooks_args, sizeof(hooks_args),
- "is_push_pem=%d,pub_file=%s,slave_user=%s,"
- "slave_ip=%s,slave_vol=%s,ssh_port=%d",
- is_pem_push, common_pem_file, slave_user,
- slave_ip, slave_vol, ssh_port);
- if ((len < 0) || (len >= sizeof(hooks_args))) {
- ret = -1;
- goto out;
- }
+ char common_pem_file[PATH_MAX] = "";
+ char errmsg[PATH_MAX] = {
+ 0,
+ };
+ char hooks_args[PATH_MAX] = "";
+ char uuid_str[64] = "";
+ char *host_uuid = NULL;
+ char *slave_url = NULL;
+ char *slave_url_buf = NULL;
+ char *slave_user = NULL;
+ char *slave_ip = NULL;
+ char *save_ptr = NULL;
+ char *slave_host = NULL;
+ char *slave_vol = NULL;
+ char *arg_buf = NULL;
+ char *volname = NULL;
+ char *slave = NULL;
+ int32_t ret = -1;
+ int32_t is_pem_push = -1;
+ int32_t ssh_port = 22;
+ gf_boolean_t is_force = -1;
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ char old_working_dir[PATH_MAX] = {0};
+ char new_working_dir[PATH_MAX] = {0};
+ char *slave_voluuid = NULL;
+ char *old_slavehost = NULL;
+ gf_boolean_t is_existing_session = _gf_false;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+
+ ret = glusterd_op_gsync_args_get(dict, op_errstr, &volname, &slave,
+ &host_uuid);
+ if (ret)
+ goto out;
+
+ len = snprintf(common_pem_file, sizeof(common_pem_file),
+ "%s" GLUSTERD_COMMON_PEM_PUB_FILE, conf->workdir);
+ if ((len < 0) || (len >= sizeof(common_pem_file))) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Volinfo for %s (master) not found", volname);
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "slave_vol", &slave_vol);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg), "Unable to fetch slave volume name.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "slave_url", &slave_url);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg), "Unable to fetch slave IP.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ /* Fetch the slave_user and slave_ip from the slave_url.
+ * If the slave_user is not present. Use "root"
+ */
+ if (strstr(slave_url, "@")) {
+ slave_url_buf = gf_strdup(slave_url);
+ if (!slave_url_buf) {
+ ret = -1;
+ goto out;
+ }
+ slave_user = strtok_r(slave_url, "@", &save_ptr);
+ slave_ip = strtok_r(NULL, "@", &save_ptr);
+ } else {
+ slave_user = "root";
+ slave_ip = slave_url;
+ }
+
+ if (!slave_user || !slave_ip) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_URL_INVALID,
+ "Invalid slave url.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "slave_host", &slave_host);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg), "Unable to fetch slave host");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_int32(dict, "ssh_port", &ssh_port);
+ if (ret < 0 && ret != -ENOENT) {
+ snprintf(errmsg, sizeof(errmsg), "Fetching ssh_port failed");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ is_force = dict_get_str_boolean(dict, "force", _gf_false);
+
+ uuid_utoa_r(MY_UUID, uuid_str);
+ if (!strcmp(uuid_str, host_uuid)) {
+ ret = dict_get_int32(dict, "push_pem", &is_pem_push);
+ if (!ret && is_pem_push) {
+ gf_msg_debug(this->name, 0,
+ "Trying to setup"
+ " pem files in slave");
+ is_pem_push = 1;
} else
- snprintf(hooks_args, sizeof(hooks_args),
- "This argument will stop the hooks script");
-
- arg_buf = gf_strdup (hooks_args);
- if (!arg_buf) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_STRDUP_FAILED,
- "Failed to gf_strdup");
- if (is_force) {
- ret = 0;
- goto create_essentials;
- }
- ret = -1;
- goto out;
- }
-
- ret = dict_set_str (dict, "hooks_args", arg_buf);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Failed to set hooks_args in dict.");
- if (is_force) {
- ret = 0;
- goto create_essentials;
- }
- goto out;
- }
+ is_pem_push = 0;
+
+ len = snprintf(hooks_args, sizeof(hooks_args),
+ "is_push_pem=%d,pub_file=%s,slave_user=%s,"
+ "slave_ip=%s,slave_vol=%s,ssh_port=%d",
+ is_pem_push, common_pem_file, slave_user, slave_ip,
+ slave_vol, ssh_port);
+ if ((len < 0) || (len >= sizeof(hooks_args))) {
+ ret = -1;
+ goto out;
+ }
+ } else
+ snprintf(hooks_args, sizeof(hooks_args),
+ "This argument will stop the hooks script");
+
+ arg_buf = gf_strdup(hooks_args);
+ if (!arg_buf) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STRDUP_FAILED,
+ "Failed to gf_strdup");
+ if (is_force) {
+ ret = 0;
+ goto create_essentials;
+ }
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "hooks_args", arg_buf);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set hooks_args in dict.");
+ if (is_force) {
+ ret = 0;
+ goto create_essentials;
+ }
+ goto out;
+ }
create_essentials:
- /* Fetch slave volume uuid, to get stored in volume info. */
- ret = dict_get_str (dict, "slave_voluuid", &slave_voluuid);
+ /* Fetch slave volume uuid, to get stored in volume info. */
+ ret = dict_get_str(dict, "slave_voluuid", &slave_voluuid);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to fetch slave volume uuid from dict");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
+ }
+
+ is_existing_session = dict_get_str_boolean(dict, "existing_session",
+ _gf_false);
+ if (is_existing_session) {
+ ret = dict_get_str(dict, "old_slavehost", &old_slavehost);
if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch slave volume uuid from dict");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "%s", errmsg);
- ret = -1;
- goto out;
+ snprintf(errmsg, sizeof(errmsg), "Unable to fetch old_slavehost");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ errmsg);
+ ret = -1;
+ goto out;
}
- is_existing_session = dict_get_str_boolean (dict, "existing_session",
- _gf_false);
- if (is_existing_session) {
- ret = dict_get_str (dict, "old_slavehost", &old_slavehost);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg),
- "Unable to fetch old_slavehost");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "%s", errmsg);
- ret = -1;
- goto out;
- }
-
- /* Rename existing geo-rep session with new Slave Host */
- ret = snprintf (old_working_dir,
- sizeof (old_working_dir) - 1,
- "%s/"GEOREP"/%s_%s_%s", conf->workdir,
- volinfo->volname, old_slavehost,
- slave_vol);
-
- ret = snprintf (new_working_dir,
- sizeof (new_working_dir) - 1,
- "%s/"GEOREP"/%s_%s_%s", conf->workdir,
- volinfo->volname, slave_host, slave_vol);
-
- ret = sys_rename (old_working_dir, new_working_dir);
- if (!ret) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_FORCE_CREATE_SESSION,
- "rename of old working dir %s to "
- "new working dir %s is done! ",
- old_working_dir, new_working_dir);
- } else {
- if (errno == ENOENT) {
- /* log error, but proceed with directory
- * creation below */
- gf_msg_debug (this->name, 0,
- "old_working_dir(%s) "
- "not present.",
- old_working_dir);
- } else {
- len = snprintf (errmsg, sizeof (errmsg),
- "rename of old working dir %s "
- "to new working dir %s "
- "failed! Error: %s",
- old_working_dir,
- new_working_dir,
- strerror (errno));
- if (len < 0) {
- strcpy(errmsg, "<error>");
- }
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_FORCE_CREATE_SESSION,
- "rename of old working dir %s to "
- "new working dir %s failed! Error: %s!",
- old_working_dir, new_working_dir,
- strerror (errno));
-
- ret = -1;
- goto out;
- }
- }
- }
-
- ret = glusterd_create_essential_dir_files (volinfo, dict, slave,
- slave_host, slave_vol,
- op_errstr);
- if (ret)
- goto out;
+ /* Rename existing geo-rep session with new Slave Host */
+ ret = snprintf(old_working_dir, sizeof(old_working_dir) - 1,
+ "%s/" GEOREP "/%s_%s_%s", conf->workdir,
+ volinfo->volname, old_slavehost, slave_vol);
- ret = glusterd_store_slave_in_info (volinfo, slave,
- host_uuid, slave_voluuid,
- op_errstr, is_force);
- if (ret) {
- snprintf (errmsg, sizeof (errmsg), "Unable to store"
- " slave info.");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SLAVEINFO_STORE_ERROR,
- "%s", errmsg);
- goto out;
- }
+ ret = snprintf(new_working_dir, sizeof(new_working_dir) - 1,
+ "%s/" GEOREP "/%s_%s_%s", conf->workdir,
+ volinfo->volname, slave_host, slave_vol);
- /* Enable marker and changelog */
- ret = glusterd_set_gsync_confs (volinfo);
- if (ret != 0) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_MARKER_START_FAIL, "marker/changelog"
- " start failed");
- snprintf (errmsg, sizeof (errmsg),
- "Index initialization failed");
+ ret = sys_rename(old_working_dir, new_working_dir);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION,
+ "rename of old working dir %s to "
+ "new working dir %s is done! ",
+ old_working_dir, new_working_dir);
+ } else {
+ if (errno == ENOENT) {
+ /* log error, but proceed with directory
+ * creation below */
+ gf_msg_debug(this->name, 0,
+ "old_working_dir(%s) "
+ "not present.",
+ old_working_dir);
+ } else {
+ len = snprintf(errmsg, sizeof(errmsg),
+ "rename of old working dir %s "
+ "to new working dir %s "
+ "failed! Error: %s",
+ old_working_dir, new_working_dir,
+ strerror(errno));
+ if (len < 0) {
+ strcpy(errmsg, "<error>");
+ }
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION,
+ "rename of old working dir %s to "
+ "new working dir %s failed! Error: %s!",
+ old_working_dir, new_working_dir, strerror(errno));
ret = -1;
goto out;
+ }
}
+ }
+
+ ret = glusterd_create_essential_dir_files(volinfo, dict, slave, slave_host,
+ slave_vol, op_errstr);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_slave_in_info(volinfo, slave, host_uuid, slave_voluuid,
+ op_errstr, is_force);
+ if (ret) {
+ snprintf(errmsg, sizeof(errmsg),
+ "Unable to store"
+ " slave info.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_STORE_ERROR, "%s",
+ errmsg);
+ goto out;
+ }
+
+ /* Enable marker and changelog */
+ ret = glusterd_set_gsync_confs(volinfo);
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_MARKER_START_FAIL,
+ "marker/changelog"
+ " start failed");
+ snprintf(errmsg, sizeof(errmsg), "Index initialization failed");
+
+ ret = -1;
+ goto out;
+ }
out:
- if (ret && errmsg[0] != '\0') {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR,
- "%s", errmsg);
- *op_errstr = gf_strdup (errmsg);
- }
-
- GF_FREE (slave_url_buf);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ if (ret && errmsg[0] != '\0') {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, "%s", errmsg);
+ *op_errstr = gf_strdup(errmsg);
+ }
+
+ GF_FREE(slave_url_buf);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c
index 72baa3424e9..0b56a0eb45a 100644
--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c
+++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c
@@ -16,209 +16,219 @@
#include "glusterd-volgen.h"
void
-glusterd_svc_build_gfproxyd_rundir (glusterd_volinfo_t *volinfo,
- char *path, int path_len)
+glusterd_svc_build_gfproxyd_rundir(glusterd_volinfo_t *volinfo, char *path,
+ int path_len)
{
- char workdir[PATH_MAX] = {0,};
- glusterd_conf_t *priv = THIS->private;
+ char workdir[PATH_MAX] = {
+ 0,
+ };
+ glusterd_conf_t *priv = THIS->private;
- GLUSTERD_GET_VOLUME_PID_DIR (workdir, volinfo, priv);
+ GLUSTERD_GET_VOLUME_PID_DIR(workdir, volinfo, priv);
- snprintf (path, path_len, "%s", workdir);
+ snprintf(path, path_len, "%s", workdir);
}
void
-glusterd_svc_build_gfproxyd_socket_filepath (glusterd_volinfo_t *volinfo,
- char *path, int path_len)
+glusterd_svc_build_gfproxyd_socket_filepath(glusterd_volinfo_t *volinfo,
+ char *path, int path_len)
{
- char sockfilepath[PATH_MAX] = {0,};
- char rundir[PATH_MAX] = {0,};
- int32_t len = 0;
-
- glusterd_svc_build_gfproxyd_rundir (volinfo, rundir, sizeof (rundir));
- len = snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s",
- rundir, uuid_utoa (MY_UUID));
- if ((len < 0) || (len >= sizeof(sockfilepath))) {
- sockfilepath[0] = 0;
- }
-
- glusterd_set_socket_filepath (sockfilepath, path, path_len);
+ char sockfilepath[PATH_MAX] = {
+ 0,
+ };
+ char rundir[PATH_MAX] = {
+ 0,
+ };
+ int32_t len = 0;
+
+ glusterd_svc_build_gfproxyd_rundir(volinfo, rundir, sizeof(rundir));
+ len = snprintf(sockfilepath, sizeof(sockfilepath), "%s/run-%s", rundir,
+ uuid_utoa(MY_UUID));
+ if ((len < 0) || (len >= sizeof(sockfilepath))) {
+ sockfilepath[0] = 0;
+ }
+
+ glusterd_set_socket_filepath(sockfilepath, path, path_len);
}
void
-glusterd_svc_build_gfproxyd_pidfile (glusterd_volinfo_t *volinfo,
- char *path, int path_len)
+glusterd_svc_build_gfproxyd_pidfile(glusterd_volinfo_t *volinfo, char *path,
+ int path_len)
{
- char rundir[PATH_MAX] = {0,};
+ char rundir[PATH_MAX] = {
+ 0,
+ };
- glusterd_svc_build_gfproxyd_rundir (volinfo, rundir, sizeof (rundir));
+ glusterd_svc_build_gfproxyd_rundir(volinfo, rundir, sizeof(rundir));
- snprintf (path, path_len, "%s/%s.gfproxyd.pid", rundir, volinfo->volname);
+ snprintf(path, path_len, "%s/%s.gfproxyd.pid", rundir, volinfo->volname);
}
void
-glusterd_svc_build_gfproxyd_volfile_path (glusterd_volinfo_t *volinfo,
- char *path, int path_len)
+glusterd_svc_build_gfproxyd_volfile_path(glusterd_volinfo_t *volinfo,
+ char *path, int path_len)
{
- char workdir[PATH_MAX] = {0,};
- glusterd_conf_t *priv = THIS->private;
+ char workdir[PATH_MAX] = {
+ 0,
+ };
+ glusterd_conf_t *priv = THIS->private;
- GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
+ GLUSTERD_GET_VOLUME_DIR(workdir, volinfo, priv);
- snprintf (path, path_len, "%s/%s.gfproxyd.vol", workdir,
- volinfo->volname);
+ snprintf(path, path_len, "%s/%s.gfproxyd.vol", workdir, volinfo->volname);
}
void
-glusterd_svc_build_gfproxyd_logdir (char *logdir, char *volname, size_t len)
+glusterd_svc_build_gfproxyd_logdir(char *logdir, char *volname, size_t len)
{
- snprintf (logdir, len, "%s/gfproxy/%s", DEFAULT_LOG_FILE_DIRECTORY,
- volname);
+ snprintf(logdir, len, "%s/gfproxy/%s", DEFAULT_LOG_FILE_DIRECTORY, volname);
}
void
-glusterd_svc_build_gfproxyd_logfile (char *logfile, char *logdir, size_t len)
+glusterd_svc_build_gfproxyd_logfile(char *logfile, char *logdir, size_t len)
{
- snprintf (logfile, len, "%s/gfproxyd.log", logdir);
+ snprintf(logfile, len, "%s/gfproxyd.log", logdir);
}
int
-glusterd_is_gfproxyd_enabled (glusterd_volinfo_t *volinfo)
+glusterd_is_gfproxyd_enabled(glusterd_volinfo_t *volinfo)
{
- return glusterd_volinfo_get_boolean (volinfo, VKEY_CONFIG_GFPROXY);
+ return glusterd_volinfo_get_boolean(volinfo, VKEY_CONFIG_GFPROXY);
}
static int
-glusterd_svc_get_gfproxyd_volfile (glusterd_volinfo_t *volinfo, char *svc_name,
- char *orgvol, char **tmpvol, int path_len)
+glusterd_svc_get_gfproxyd_volfile(glusterd_volinfo_t *volinfo, char *svc_name,
+ char *orgvol, char **tmpvol, int path_len)
{
- int tmp_fd = -1;
- int ret = -1;
- int need_unlink = 0;
-
- glusterd_svc_build_gfproxyd_volfile_path (volinfo, orgvol,
- path_len);
-
- ret = gf_asprintf(tmpvol, "/tmp/g%s-XXXXXX", svc_name);
- if (ret < 0) {
- goto out;
- }
-
- /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
- tmp_fd = mkstemp (*tmpvol);
- if (tmp_fd < 0) {
- gf_msg ("glusterd", GF_LOG_WARNING, errno,
- GD_MSG_FILE_OP_FAILED, "Unable to create temp file"
- " %s:(%s)", *tmpvol, strerror (errno));
- ret = -1;
- goto out;
- }
-
- need_unlink = 1;
- ret = glusterd_build_gfproxyd_volfile (volinfo, *tmpvol);
+ int tmp_fd = -1;
+ int ret = -1;
+ int need_unlink = 0;
+
+ glusterd_svc_build_gfproxyd_volfile_path(volinfo, orgvol, path_len);
+
+ ret = gf_asprintf(tmpvol, "/tmp/g%s-XXXXXX", svc_name);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /* coverity[secure_temp] mkstemp uses 0600 as the mode and is safe */
+ tmp_fd = mkstemp(*tmpvol);
+ if (tmp_fd < 0) {
+ gf_msg("glusterd", GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to create temp file"
+ " %s:(%s)",
+ *tmpvol, strerror(errno));
+ ret = -1;
+ goto out;
+ }
+
+ need_unlink = 1;
+ ret = glusterd_build_gfproxyd_volfile(volinfo, *tmpvol);
out:
- if (need_unlink && ret < 0)
- sys_unlink (*tmpvol);
+ if (need_unlink && ret < 0)
+ sys_unlink(*tmpvol);
- if ((ret < 0) && (*tmpvol != NULL)) {
- GF_FREE(*tmpvol);
- *tmpvol = NULL;
- }
+ if ((ret < 0) && (*tmpvol != NULL)) {
+ GF_FREE(*tmpvol);
+ *tmpvol = NULL;
+ }
- if (tmp_fd >= 0)
- sys_close (tmp_fd);
+ if (tmp_fd >= 0)
+ sys_close(tmp_fd);
- return ret;
+ return ret;
}
int
-glusterd_svc_check_gfproxyd_volfile_identical (char *svc_name,
- glusterd_volinfo_t *volinfo,
- gf_boolean_t *identical)
+glusterd_svc_check_gfproxyd_volfile_identical(char *svc_name,
+ glusterd_volinfo_t *volinfo,
+ gf_boolean_t *identical)
{
- char orgvol[PATH_MAX] = {0,};
- char *tmpvol = NULL;
- int ret = -1;
- int need_unlink = 0;
+ char orgvol[PATH_MAX] = {
+ 0,
+ };
+ char *tmpvol = NULL;
+ int ret = -1;
+ int need_unlink = 0;
- GF_VALIDATE_OR_GOTO ("glusterd", identical, out);
+ GF_VALIDATE_OR_GOTO("glusterd", identical, out);
- ret = glusterd_svc_get_gfproxyd_volfile (volinfo, svc_name, orgvol,
- &tmpvol, PATH_MAX);
- if (ret)
- goto out;
+ ret = glusterd_svc_get_gfproxyd_volfile(volinfo, svc_name, orgvol, &tmpvol,
+ PATH_MAX);
+ if (ret)
+ goto out;
- need_unlink = 1;
- ret = glusterd_check_files_identical (orgvol, tmpvol,
- identical);
- if (ret)
- goto out;
+ need_unlink = 1;
+ ret = glusterd_check_files_identical(orgvol, tmpvol, identical);
+ if (ret)
+ goto out;
out:
- if (need_unlink)
- sys_unlink (tmpvol);
+ if (need_unlink)
+ sys_unlink(tmpvol);
- if (tmpvol != NULL)
- GF_FREE (tmpvol);
+ if (tmpvol != NULL)
+ GF_FREE(tmpvol);
- return ret;
+ return ret;
}
int
-glusterd_svc_check_gfproxyd_topology_identical (char *svc_name,
- glusterd_volinfo_t *volinfo,
- gf_boolean_t *identical)
+glusterd_svc_check_gfproxyd_topology_identical(char *svc_name,
+ glusterd_volinfo_t *volinfo,
+ gf_boolean_t *identical)
{
- char orgvol[PATH_MAX] = {0,};
- char *tmpvol = NULL;
- int ret = -1;
- int tmpclean = 0;
+ char orgvol[PATH_MAX] = {
+ 0,
+ };
+ char *tmpvol = NULL;
+ int ret = -1;
+ int tmpclean = 0;
- GF_VALIDATE_OR_GOTO ("glusterd", identical, out);
+ GF_VALIDATE_OR_GOTO("glusterd", identical, out);
- ret = glusterd_svc_get_gfproxyd_volfile (volinfo, svc_name, orgvol,
- &tmpvol, PATH_MAX);
- if (ret)
- goto out;
+ ret = glusterd_svc_get_gfproxyd_volfile(volinfo, svc_name, orgvol, &tmpvol,
+ PATH_MAX);
+ if (ret)
+ goto out;
- tmpclean = 1; /* SET the flag to unlink() tmpfile */
+ tmpclean = 1; /* SET the flag to unlink() tmpfile */
- /* Compare the topology of volfiles */
- ret = glusterd_check_topology_identical (orgvol, tmpvol,
- identical);
+ /* Compare the topology of volfiles */
+ ret = glusterd_check_topology_identical(orgvol, tmpvol, identical);
out:
- if (tmpclean)
- sys_unlink (tmpvol);
+ if (tmpclean)
+ sys_unlink(tmpvol);
- if (tmpvol != NULL)
- GF_FREE (tmpvol);
+ if (tmpvol != NULL)
+ GF_FREE(tmpvol);
- return ret;
+ return ret;
}
glusterd_volinfo_t *
-glusterd_gfproxyd_volinfo_from_svc (glusterd_svc_t *svc)
+glusterd_gfproxyd_volinfo_from_svc(glusterd_svc_t *svc)
{
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_gfproxydsvc_t *gfproxyd = NULL;
-
- /* Get volinfo->gfproxyd from svc object */
- gfproxyd = cds_list_entry (svc, glusterd_gfproxydsvc_t, svc);
- if (!gfproxyd) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_SNAPD_OBJ_GET_FAIL, "Failed to get gfproxyd "
- "object from gfproxyd service");
- goto out;
- }
-
- /* Get volinfo from gfproxyd */
- volinfo = cds_list_entry (gfproxyd, glusterd_volinfo_t, gfproxyd);
- if (!volinfo) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL, "Failed to get volinfo from "
- "from gfproxyd");
- goto out;
- }
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_gfproxydsvc_t *gfproxyd = NULL;
+
+ /* Get volinfo->gfproxyd from svc object */
+ gfproxyd = cds_list_entry(svc, glusterd_gfproxydsvc_t, svc);
+ if (!gfproxyd) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SNAPD_OBJ_GET_FAIL,
+ "Failed to get gfproxyd "
+ "object from gfproxyd service");
+ goto out;
+ }
+
+ /* Get volinfo from gfproxyd */
+ volinfo = cds_list_entry(gfproxyd, glusterd_volinfo_t, gfproxyd);
+ if (!volinfo) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "from gfproxyd");
+ goto out;
+ }
out:
- return volinfo;
+ return volinfo;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
index b92109cc027..0a78d4d1fd0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c
@@ -21,450 +21,452 @@
#include "syscall.h"
void
-glusterd_gfproxydsvc_build (glusterd_svc_t *svc)
+glusterd_gfproxydsvc_build(glusterd_svc_t *svc)
{
- svc->manager = glusterd_gfproxydsvc_manager;
- svc->start = glusterd_gfproxydsvc_start;
- svc->stop = glusterd_gfproxydsvc_stop;
- svc->reconfigure = glusterd_gfproxydsvc_reconfigure;
+ svc->manager = glusterd_gfproxydsvc_manager;
+ svc->start = glusterd_gfproxydsvc_start;
+ svc->stop = glusterd_gfproxydsvc_stop;
+ svc->reconfigure = glusterd_gfproxydsvc_reconfigure;
}
-
-int glusterd_gfproxydsvc_stop (glusterd_svc_t *svc, int sig)
+int
+glusterd_gfproxydsvc_stop(glusterd_svc_t *svc, int sig)
{
- glusterd_volinfo_t *volinfo = NULL;
- int ret = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = 0;
- ret = glusterd_svc_stop (svc, sig);
- if (ret)
- goto out;
+ ret = glusterd_svc_stop(svc, sig);
+ if (ret)
+ goto out;
- volinfo = glusterd_gfproxyd_volinfo_from_svc (svc);
- volinfo->gfproxyd.port = 0;
+ volinfo = glusterd_gfproxyd_volinfo_from_svc(svc);
+ volinfo->gfproxyd.port = 0;
out:
- return ret;
+ return ret;
}
-
-int glusterd_gfproxydsvc_init (glusterd_volinfo_t *volinfo)
+int
+glusterd_gfproxydsvc_init(glusterd_volinfo_t *volinfo)
{
- int ret = -1;
- char rundir[PATH_MAX] = {0,};
- char sockpath[PATH_MAX] = {0,};
- char pidfile[PATH_MAX] = {0,};
- char volfile[PATH_MAX] = {0,};
- char logdir[PATH_MAX] = {0,};
- char logfile[PATH_MAX] = {0,};
- char volfileid[256] = {0};
- glusterd_svc_t *svc = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_conn_notify_t notify = NULL;
- xlator_t *this = NULL;
- char *volfileserver = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
-
- svc = &(volinfo->gfproxyd.svc);
-
- ret = snprintf (svc->name, sizeof (svc->name), "%s", gfproxyd_svc_name);
- if (ret < 0)
- goto out;
-
- notify = glusterd_svc_common_rpc_notify;
-
- glusterd_svc_build_gfproxyd_rundir (volinfo, rundir, sizeof (rundir));
- glusterd_svc_create_rundir (rundir);
-
- /* Initialize the connection mgmt */
- glusterd_svc_build_gfproxyd_socket_filepath (volinfo, sockpath,
- sizeof (sockpath));
- ret = glusterd_conn_init (&(svc->conn), sockpath, 600, notify);
- if (ret)
- goto out;
-
- /* Initialize the process mgmt */
- glusterd_svc_build_gfproxyd_pidfile (volinfo, pidfile, sizeof (pidfile));
- glusterd_svc_build_gfproxyd_volfile_path (volinfo, volfile,
- sizeof (volfile));
- glusterd_svc_build_gfproxyd_logdir (logdir, volinfo->volname,
- sizeof (logdir));
- ret = mkdir_p (logdir, 0755, _gf_true);
- if ((ret == -1) && (EEXIST != errno)) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_CREATE_DIR_FAILED, "Unable to create logdir %s",
- logdir);
- goto out;
- }
- glusterd_svc_build_gfproxyd_logfile (logfile, logdir, sizeof (logfile));
- len = snprintf (volfileid, sizeof (volfileid), "gfproxyd/%s",
- volinfo->volname);
- if ((len < 0) || (len >= sizeof(volfileid))) {
- ret = -1;
- goto out;
- }
-
- if (dict_get_strn (this->options, "transport.socket.bind-address",
- SLEN ("transport.socket.bind-address"),
- &volfileserver) != 0) {
- volfileserver = "localhost";
- }
- ret = glusterd_proc_init (&(svc->proc), gfproxyd_svc_name, pidfile,
- logdir, logfile, volfile, volfileid,
- volfileserver);
- if (ret)
- goto out;
+ int ret = -1;
+ char rundir[PATH_MAX] = {
+ 0,
+ };
+ char sockpath[PATH_MAX] = {
+ 0,
+ };
+ char pidfile[PATH_MAX] = {
+ 0,
+ };
+ char volfile[PATH_MAX] = {
+ 0,
+ };
+ char logdir[PATH_MAX] = {
+ 0,
+ };
+ char logfile[PATH_MAX] = {
+ 0,
+ };
+ char volfileid[256] = {0};
+ glusterd_svc_t *svc = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_conn_notify_t notify = NULL;
+ xlator_t *this = NULL;
+ char *volfileserver = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ svc = &(volinfo->gfproxyd.svc);
+
+ ret = snprintf(svc->name, sizeof(svc->name), "%s", gfproxyd_svc_name);
+ if (ret < 0)
+ goto out;
+
+ notify = glusterd_svc_common_rpc_notify;
+
+ glusterd_svc_build_gfproxyd_rundir(volinfo, rundir, sizeof(rundir));
+ glusterd_svc_create_rundir(rundir);
+
+ /* Initialize the connection mgmt */
+ glusterd_svc_build_gfproxyd_socket_filepath(volinfo, sockpath,
+ sizeof(sockpath));
+ ret = glusterd_conn_init(&(svc->conn), sockpath, 600, notify);
+ if (ret)
+ goto out;
+
+ /* Initialize the process mgmt */
+ glusterd_svc_build_gfproxyd_pidfile(volinfo, pidfile, sizeof(pidfile));
+ glusterd_svc_build_gfproxyd_volfile_path(volinfo, volfile, sizeof(volfile));
+ glusterd_svc_build_gfproxyd_logdir(logdir, volinfo->volname,
+ sizeof(logdir));
+ ret = mkdir_p(logdir, 0755, _gf_true);
+ if ((ret == -1) && (EEXIST != errno)) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create logdir %s", logdir);
+ goto out;
+ }
+ glusterd_svc_build_gfproxyd_logfile(logfile, logdir, sizeof(logfile));
+ len = snprintf(volfileid, sizeof(volfileid), "gfproxyd/%s",
+ volinfo->volname);
+ if ((len < 0) || (len >= sizeof(volfileid))) {
+ ret = -1;
+ goto out;
+ }
+
+ if (dict_get_strn(this->options, "transport.socket.bind-address",
+ SLEN("transport.socket.bind-address"),
+ &volfileserver) != 0) {
+ volfileserver = "localhost";
+ }
+ ret = glusterd_proc_init(&(svc->proc), gfproxyd_svc_name, pidfile, logdir,
+ logfile, volfile, volfileid, volfileserver);
+ if (ret)
+ goto out;
out:
- gf_msg_debug (this ? this->name : "glusterd", 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
+ return ret;
}
-
static int
-glusterd_gfproxydsvc_create_volfile (glusterd_volinfo_t *volinfo)
+glusterd_gfproxydsvc_create_volfile(glusterd_volinfo_t *volinfo)
{
- int ret = -1;
- xlator_t *this = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
- ret = glusterd_generate_gfproxyd_volfile (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Failed to create volfile");
- goto out;
- }
+ ret = glusterd_generate_gfproxyd_volfile(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Failed to create volfile");
+ goto out;
+ }
out:
- gf_msg_debug (this ? this->name : "glusterd", 0, "Returning %d", ret);
-
- return ret;
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_gfproxydsvc_manager (glusterd_svc_t *svc, void *data, int flags)
+glusterd_gfproxydsvc_manager(glusterd_svc_t *svc, void *data, int flags)
{
- int ret = -1;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- volinfo = data;
- GF_VALIDATE_OR_GOTO (this->name, data, out);
-
- if (!svc->inited) {
- ret = glusterd_gfproxydsvc_init (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FAILED_INIT_QUOTASVC, "Failed to init "
- "gfproxyd service");
- goto out;
- } else {
- svc->inited = _gf_true;
- gf_msg_debug (this->name, 0, "gfproxyd service "
- "initialized");
- }
+ int ret = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ volinfo = data;
+ GF_VALIDATE_OR_GOTO(this->name, data, out);
+
+ if (!svc->inited) {
+ ret = glusterd_gfproxydsvc_init(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_QUOTASVC,
+ "Failed to init "
+ "gfproxyd service");
+ goto out;
+ } else {
+ svc->inited = _gf_true;
+ gf_msg_debug(this->name, 0,
+ "gfproxyd service "
+ "initialized");
+ }
+ }
+
+ ret = glusterd_is_gfproxyd_enabled(volinfo);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to read volume "
+ "options");
+ goto out;
+ }
+
+ if (ret) {
+ if (!glusterd_is_volume_started(volinfo)) {
+ if (glusterd_proc_is_running(&svc->proc)) {
+ ret = svc->stop(svc, SIGTERM);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_STOP_FAIL,
+ "Couldn't stop gfproxyd for "
+ "volume: %s",
+ volinfo->volname);
+ } else {
+ /* Since gfproxyd is not running set ret to 0 */
+ ret = 0;
+ }
+ goto out;
}
- ret = glusterd_is_gfproxyd_enabled (volinfo);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL, "Failed to read volume "
- "options");
- goto out;
+ ret = glusterd_gfproxydsvc_create_volfile(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_CREATE_FAIL,
+ "Couldn't create "
+ "gfroxyd volfile for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+ ret = svc->stop(svc, SIGTERM);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_START_FAIL,
+ "Couldn't stop "
+ "gfproxyd for volume: %s",
+ volinfo->volname);
+ goto out;
}
+ ret = svc->start(svc, flags);
if (ret) {
- if (!glusterd_is_volume_started (volinfo)) {
- if (glusterd_proc_is_running (&svc->proc)) {
- ret = svc->stop (svc, SIGTERM);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAPD_STOP_FAIL,
- "Couldn't stop gfproxyd for "
- "volume: %s",
- volinfo->volname);
- } else {
- /* Since gfproxyd is not running set ret to 0 */
- ret = 0;
- }
- goto out;
- }
-
- ret = glusterd_gfproxydsvc_create_volfile (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAPD_CREATE_FAIL, "Couldn't create "
- "gfroxyd volfile for volume: %s",
- volinfo->volname);
- goto out;
- }
- ret = svc->stop (svc, SIGTERM);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAPD_START_FAIL, "Couldn't stop "
- "gfproxyd for volume: %s", volinfo->volname);
- goto out;
- }
-
- ret = svc->start (svc, flags);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAPD_START_FAIL, "Couldn't start "
- "gfproxyd for volume: %s", volinfo->volname);
- goto out;
- }
-
- glusterd_volinfo_ref (volinfo);
- ret = glusterd_conn_connect (&(svc->conn));
- if (ret) {
- glusterd_volinfo_unref (volinfo);
- volinfo = NULL;
- goto out;
- }
-
- } else if (glusterd_proc_is_running (&svc->proc)) {
- ret = svc->stop (svc, SIGTERM);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAPD_STOP_FAIL,
- "Couldn't stop gfproxyd for volume: %s",
- volinfo->volname);
- goto out;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_START_FAIL,
+ "Couldn't start "
+ "gfproxyd for volume: %s",
+ volinfo->volname);
+ goto out;
}
-out:
+ glusterd_volinfo_ref(volinfo);
+ ret = glusterd_conn_connect(&(svc->conn));
if (ret) {
- if (volinfo) {
- gf_event (EVENT_SVC_MANAGER_FAILED,
- "volume=%s;svc_name=%s",
- volinfo->volname, svc->name);
- }
+ glusterd_volinfo_unref(volinfo);
+ volinfo = NULL;
+ goto out;
}
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
+ } else if (glusterd_proc_is_running(&svc->proc)) {
+ ret = svc->stop(svc, SIGTERM);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_STOP_FAIL,
+ "Couldn't stop gfproxyd for volume: %s", volinfo->volname);
+ goto out;
+ }
+ }
- return ret;
+out:
+ if (ret) {
+ if (volinfo) {
+ gf_event(EVENT_SVC_MANAGER_FAILED, "volume=%s;svc_name=%s",
+ volinfo->volname, svc->name);
+ }
+ }
+
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+
+ return ret;
}
int
-glusterd_gfproxydsvc_start (glusterd_svc_t *svc, int flags)
+glusterd_gfproxydsvc_start(glusterd_svc_t *svc, int flags)
{
- int ret = -1;
- runner_t runner = {0,};
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- char valgrind_logfile[PATH_MAX] = {0};
- int gfproxyd_port = 0;
- char msg[1024] = {0,};
- char gfproxyd_id[PATH_MAX] = {0,};
- glusterd_volinfo_t *volinfo = NULL;
- char *localtime_logging = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
-
- volinfo = glusterd_gfproxyd_volinfo_from_svc (svc);
- if (!volinfo)
- goto out;
-
- ret = sys_access (svc->proc.volfile, F_OK);
+ int ret = -1;
+ runner_t runner = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ char valgrind_logfile[PATH_MAX] = {0};
+ int gfproxyd_port = 0;
+ char msg[1024] = {
+ 0,
+ };
+ char gfproxyd_id[PATH_MAX] = {
+ 0,
+ };
+ glusterd_volinfo_t *volinfo = NULL;
+ char *localtime_logging = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ volinfo = glusterd_gfproxyd_volinfo_from_svc(svc);
+ if (!volinfo)
+ goto out;
+
+ ret = sys_access(svc->proc.volfile, F_OK);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_DEBUG, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "gfproxyd Volfile %s is not present", svc->proc.volfile);
+ ret = glusterd_gfproxydsvc_create_volfile(volinfo);
if (ret) {
- gf_msg (this->name, GF_LOG_DEBUG, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "gfproxyd Volfile %s is not present", svc->proc.volfile);
- ret = glusterd_gfproxydsvc_create_volfile (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL, "Couldn't create "
- "gfproxyd volfile for volume: %s",
- volinfo->volname);
- goto out;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Couldn't create "
+ "gfproxyd volfile for volume: %s",
+ volinfo->volname);
+ goto out;
}
- runinit (&runner);
-
- if (this->ctx->cmd_args.valgrind) {
- len = snprintf (valgrind_logfile, PATH_MAX, "%s/valgrind-%s",
- svc->proc.logdir, svc->proc.logfile);
- if ((len < 0) || (len >= PATH_MAX)) {
- ret = -1;
- goto out;
- }
-
- runner_add_args (&runner, "valgrind", "--leak-check=full",
- "--trace-children=yes", "--track-origins=yes",
- NULL);
- runner_argprintf (&runner, "--log-file=%s", valgrind_logfile);
+ }
+ runinit(&runner);
+
+ if (this->ctx->cmd_args.valgrind) {
+ len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s",
+ svc->proc.logdir, svc->proc.logfile);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ ret = -1;
+ goto out;
}
- snprintf (gfproxyd_id, sizeof (gfproxyd_id), "gfproxyd-%s",
- volinfo->volname);
- runner_add_args (&runner, SBIN_DIR"/glusterfsd",
- "-s", svc->proc.volfileserver,
- "--volfile-id", svc->proc.volfileid,
- "-p", svc->proc.pidfile,
- "-l", svc->proc.logfile,
- "--brick-name", gfproxyd_id,
- "-S", svc->conn.sockpath, NULL);
-
- if (volinfo->memory_accounting)
- runner_add_arg (&runner, "--mem-accounting");
- if (dict_get_strn (priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
- SLEN (GLUSTERD_LOCALTIME_LOGGING_KEY),
- &localtime_logging) == 0) {
- if (strcmp (localtime_logging, "enable") == 0)
- runner_add_arg (&runner, "--localtime-logging");
- }
-
- gfproxyd_port = pmap_assign_port (this, volinfo->gfproxyd.port,
- gfproxyd_id);
- volinfo->gfproxyd.port = gfproxyd_port;
-
- runner_add_arg (&runner, "--brick-port");
- runner_argprintf (&runner, "%d", gfproxyd_port);
- runner_add_arg (&runner, "--xlator-option");
- runner_argprintf (&runner, "%s-server.listen-port=%d",
- volinfo->volname, gfproxyd_port);
-
- snprintf (msg, sizeof (msg),
- "Starting the gfproxyd service for volume %s",
- volinfo->volname);
- runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
-
- if (flags == PROC_START_NO_WAIT) {
- ret = runner_run_nowait (&runner);
- } else {
- synclock_unlock (&priv->big_lock);
- {
- ret = runner_run (&runner);
- }
- synclock_lock (&priv->big_lock);
+ runner_add_args(&runner, "valgrind", "--leak-check=full",
+ "--trace-children=yes", "--track-origins=yes", NULL);
+ runner_argprintf(&runner, "--log-file=%s", valgrind_logfile);
+ }
+
+ snprintf(gfproxyd_id, sizeof(gfproxyd_id), "gfproxyd-%s", volinfo->volname);
+ runner_add_args(&runner, SBIN_DIR "/glusterfsd", "-s",
+ svc->proc.volfileserver, "--volfile-id",
+ svc->proc.volfileid, "-p", svc->proc.pidfile, "-l",
+ svc->proc.logfile, "--brick-name", gfproxyd_id, "-S",
+ svc->conn.sockpath, NULL);
+
+ if (volinfo->memory_accounting)
+ runner_add_arg(&runner, "--mem-accounting");
+ if (dict_get_strn(priv->opts, GLUSTERD_LOCALTIME_LOGGING_KEY,
+ SLEN(GLUSTERD_LOCALTIME_LOGGING_KEY),
+ &localtime_logging) == 0) {
+ if (strcmp(localtime_logging, "enable") == 0)
+ runner_add_arg(&runner, "--localtime-logging");
+ }
+
+ gfproxyd_port = pmap_assign_port(this, volinfo->gfproxyd.port, gfproxyd_id);
+ volinfo->gfproxyd.port = gfproxyd_port;
+
+ runner_add_arg(&runner, "--brick-port");
+ runner_argprintf(&runner, "%d", gfproxyd_port);
+ runner_add_arg(&runner, "--xlator-option");
+ runner_argprintf(&runner, "%s-server.listen-port=%d", volinfo->volname,
+ gfproxyd_port);
+
+ snprintf(msg, sizeof(msg), "Starting the gfproxyd service for volume %s",
+ volinfo->volname);
+ runner_log(&runner, this->name, GF_LOG_DEBUG, msg);
+
+ if (flags == PROC_START_NO_WAIT) {
+ ret = runner_run_nowait(&runner);
+ } else {
+ synclock_unlock(&priv->big_lock);
+ {
+ ret = runner_run(&runner);
}
+ synclock_lock(&priv->big_lock);
+ }
out:
- return ret;
+ return ret;
}
-
int
-glusterd_gfproxydsvc_restart ()
+glusterd_gfproxydsvc_restart()
{
- glusterd_volinfo_t *volinfo = NULL;
- int ret = -1;
- xlator_t *this = THIS;
- glusterd_conf_t *conf = NULL;
- glusterd_svc_t *svc = NULL;
-
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, conf, out);
-
- cds_list_for_each_entry (volinfo, &conf->volumes, vol_list) {
- /* Start per volume gfproxyd svc */
- if (volinfo->status == GLUSTERD_STATUS_STARTED) {
- svc = &(volinfo->gfproxyd.svc);
- ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAPD_START_FAIL,
- "Couldn't resolve gfproxyd for "
- "vol: %s on restart", volinfo->volname);
- gf_event (EVENT_SVC_MANAGER_FAILED,
- "volume=%s;svc_name=%s",
- volinfo->volname, svc->name);
- goto out;
- }
- }
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = -1;
+ xlator_t *this = THIS;
+ glusterd_conf_t *conf = NULL;
+ glusterd_svc_t *svc = NULL;
+
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
+ {
+ /* Start per volume gfproxyd svc */
+ if (volinfo->status == GLUSTERD_STATUS_STARTED) {
+ svc = &(volinfo->gfproxyd.svc);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_START_FAIL,
+ "Couldn't resolve gfproxyd for "
+ "vol: %s on restart",
+ volinfo->volname);
+ gf_event(EVENT_SVC_MANAGER_FAILED, "volume=%s;svc_name=%s",
+ volinfo->volname, svc->name);
+ goto out;
+ }
}
+ }
out:
- return ret;
+ return ret;
}
-
int
-glusterd_gfproxydsvc_reconfigure (void *data)
+glusterd_gfproxydsvc_reconfigure(void *data)
{
- int ret = -1;
- xlator_t *this = NULL;
- gf_boolean_t identical = _gf_false;
- glusterd_volinfo_t *volinfo = NULL;
-
- volinfo = data;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- if (!volinfo->gfproxyd.svc.inited)
- goto manager;
-
- if (!glusterd_is_gfproxyd_enabled (volinfo))
- goto manager;
- else if (!glusterd_proc_is_running (&volinfo->gfproxyd.svc.proc))
- goto manager;
-
- /*
- * Check both OLD and NEW volfiles, if they are SAME by size
- * and cksum i.e. "character-by-character". If YES, then
- * NOTHING has been changed, just return.
- */
- ret = glusterd_svc_check_gfproxyd_volfile_identical
- (volinfo->gfproxyd.svc.name, volinfo, &identical);
- if (ret)
- goto out;
-
- if (identical) {
- ret = 0;
- goto out;
- }
-
- /*
- * They are not identical. Find out if the topology is changed
- * OR just the volume options. If just the options which got
- * changed, then inform the xlator to reconfigure the options.
- */
- identical = _gf_false; /* RESET the FLAG */
- ret = glusterd_svc_check_gfproxyd_topology_identical
- (volinfo->gfproxyd.svc.name, volinfo, &identical);
- if (ret)
- goto out;
-
- /* Topology is not changed, but just the options. But write the
- * options to gfproxyd volfile, so that gfproxyd will be reconfigured.
- */
- if (identical) {
- ret = glusterd_gfproxydsvc_create_volfile (volinfo);
- if (ret == 0) {/* Only if above PASSES */
- ret = glusterd_fetchspec_notify (this);
- }
- goto out;
+ int ret = -1;
+ xlator_t *this = NULL;
+ gf_boolean_t identical = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ volinfo = data;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ if (!volinfo->gfproxyd.svc.inited)
+ goto manager;
+
+ if (!glusterd_is_gfproxyd_enabled(volinfo))
+ goto manager;
+ else if (!glusterd_proc_is_running(&volinfo->gfproxyd.svc.proc))
+ goto manager;
+
+ /*
+ * Check both OLD and NEW volfiles, if they are SAME by size
+ * and cksum i.e. "character-by-character". If YES, then
+ * NOTHING has been changed, just return.
+ */
+ ret = glusterd_svc_check_gfproxyd_volfile_identical(
+ volinfo->gfproxyd.svc.name, volinfo, &identical);
+ if (ret)
+ goto out;
+
+ if (identical) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * They are not identical. Find out if the topology is changed
+ * OR just the volume options. If just the options which got
+ * changed, then inform the xlator to reconfigure the options.
+ */
+ identical = _gf_false; /* RESET the FLAG */
+ ret = glusterd_svc_check_gfproxyd_topology_identical(
+ volinfo->gfproxyd.svc.name, volinfo, &identical);
+ if (ret)
+ goto out;
+
+ /* Topology is not changed, but just the options. But write the
+ * options to gfproxyd volfile, so that gfproxyd will be reconfigured.
+ */
+ if (identical) {
+ ret = glusterd_gfproxydsvc_create_volfile(volinfo);
+ if (ret == 0) { /* Only if above PASSES */
+ ret = glusterd_fetchspec_notify(this);
}
+ goto out;
+ }
manager:
- /*
- * gfproxyd volfile's topology has been changed. gfproxyd server needs
- * to be RESTARTED to ACT on the changed volfile.
- */
- ret = volinfo->gfproxyd.svc.manager (&(volinfo->gfproxyd.svc), volinfo,
- PROC_START_NO_WAIT);
+ /*
+ * gfproxyd volfile's topology has been changed. gfproxyd server needs
+ * to be RESTARTED to ACT on the changed volfile.
+ */
+ ret = volinfo->gfproxyd.svc.manager(&(volinfo->gfproxyd.svc), volinfo,
+ PROC_START_NO_WAIT);
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 8de9008dac2..12e7d30320e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -57,229 +57,222 @@
extern glusterd_op_info_t opinfo;
static int volcount;
-int glusterd_big_locked_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event,
- void *data, rpc_clnt_notify_t notify_fn)
+int
+glusterd_big_locked_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data,
+ rpc_clnt_notify_t notify_fn)
{
- glusterd_conf_t *priv = THIS->private;
- int ret = -1;
+ glusterd_conf_t *priv = THIS->private;
+ int ret = -1;
- synclock_lock (&priv->big_lock);
- ret = notify_fn (rpc, mydata, event, data);
- synclock_unlock (&priv->big_lock);
+ synclock_lock(&priv->big_lock);
+ ret = notify_fn(rpc, mydata, event, data);
+ synclock_unlock(&priv->big_lock);
- return ret;
+ return ret;
}
-int glusterd_big_locked_handler (rpcsvc_request_t *req, rpcsvc_actor actor_fn)
+int
+glusterd_big_locked_handler(rpcsvc_request_t *req, rpcsvc_actor actor_fn)
{
- glusterd_conf_t *priv = THIS->private;
- int ret = -1;
+ glusterd_conf_t *priv = THIS->private;
+ int ret = -1;
- synclock_lock (&priv->big_lock);
- ret = actor_fn (req);
- synclock_unlock (&priv->big_lock);
+ synclock_lock(&priv->big_lock);
+ ret = actor_fn(req);
+ synclock_unlock(&priv->big_lock);
- return ret;
+ return ret;
}
static int
-glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
- char *hostname, int port,
- gd1_mgmt_friend_req *friend_req)
+glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
+ int port, gd1_mgmt_friend_req *friend_req)
{
- int ret = -1;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_friend_sm_event_t *event = NULL;
- glusterd_friend_req_ctx_t *ctx = NULL;
- char rhost[UNIX_PATH_MAX + 1] = {0};
- uuid_t friend_uuid = {0};
- dict_t *dict = NULL;
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_req_ctx_t *ctx = NULL;
+ char rhost[UNIX_PATH_MAX + 1] = {0};
+ uuid_t friend_uuid = {0};
+ dict_t *dict = NULL;
- gf_uuid_parse (uuid_utoa (uuid), friend_uuid);
- if (!port)
- port = GF_DEFAULT_BASE_PORT;
+ gf_uuid_parse(uuid_utoa(uuid), friend_uuid);
+ if (!port)
+ port = GF_DEFAULT_BASE_PORT;
- ret = glusterd_remote_hostname_get (req, rhost, sizeof (rhost));
+ ret = glusterd_remote_hostname_get(req, rhost, sizeof(rhost));
- rcu_read_lock ();
+ rcu_read_lock();
- peerinfo = glusterd_peerinfo_find (uuid, rhost);
+ peerinfo = glusterd_peerinfo_find(uuid, rhost);
- if (peerinfo == NULL) {
- gf_event (EVENT_PEER_REJECT, "peer=%s", hostname);
- ret = glusterd_xfer_friend_add_resp (req, hostname, rhost, port,
- -1, GF_PROBE_UNKNOWN_PEER);
- if (friend_req->vols.vols_val) {
- free (friend_req->vols.vols_val);
- friend_req->vols.vols_val = NULL;
- }
- goto out;
+ if (peerinfo == NULL) {
+ gf_event(EVENT_PEER_REJECT, "peer=%s", hostname);
+ ret = glusterd_xfer_friend_add_resp(req, hostname, rhost, port, -1,
+ GF_PROBE_UNKNOWN_PEER);
+ if (friend_req->vols.vols_val) {
+ free(friend_req->vols.vols_val);
+ friend_req->vols.vols_val = NULL;
}
+ goto out;
+ }
- ret = glusterd_friend_sm_new_event
- (GD_FRIEND_EVENT_RCVD_FRIEND_REQ, &event);
+ ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_RCVD_FRIEND_REQ, &event);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_EVENT_NEW_GET_FAIL,
- "event generation failed: %d", ret);
- goto out;
- }
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
+ "event generation failed: %d", ret);
+ goto out;
+ }
- event->peername = gf_strdup (peerinfo->hostname);
- gf_uuid_copy (event->peerid, peerinfo->uuid);
+ event->peername = gf_strdup(peerinfo->hostname);
+ gf_uuid_copy(event->peerid, peerinfo->uuid);
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
- if (!ctx) {
- gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY, "Unable to allocate memory");
- ret = -1;
- goto out;
- }
+ if (!ctx) {
+ gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Unable to allocate memory");
+ ret = -1;
+ goto out;
+ }
- gf_uuid_copy (ctx->uuid, uuid);
- if (hostname)
- ctx->hostname = gf_strdup (hostname);
- ctx->req = req;
+ gf_uuid_copy(ctx->uuid, uuid);
+ if (hostname)
+ ctx->hostname = gf_strdup(hostname);
+ ctx->req = req;
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- goto out;
- }
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
- ret = dict_unserialize (friend_req->vols.vols_val,
- friend_req->vols.vols_len,
- &dict);
+ ret = dict_unserialize(friend_req->vols.vols_val, friend_req->vols.vols_len,
+ &dict);
- if (ret)
- goto out;
- else
- dict->extra_stdfree = friend_req->vols.vols_val;
+ if (ret)
+ goto out;
+ else
+ dict->extra_stdfree = friend_req->vols.vols_val;
- ctx->vols = dict;
- event->ctx = ctx;
+ ctx->vols = dict;
+ event->ctx = ctx;
- ret = glusterd_friend_sm_inject_event (event);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_EVENT_INJECT_FAIL,
- "Unable to inject event %d, "
- "ret = %d", event->event, ret);
- goto out;
- }
+ ret = glusterd_friend_sm_inject_event(event);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
+ "Unable to inject event %d, "
+ "ret = %d",
+ event->event, ret);
+ goto out;
+ }
- ret = 0;
- if (peerinfo && (0 == peerinfo->connected))
- ret = GLUSTERD_CONNECTION_AWAITED;
+ ret = 0;
+ if (peerinfo && (0 == peerinfo->connected))
+ ret = GLUSTERD_CONNECTION_AWAITED;
out:
- rcu_read_unlock ();
-
- if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
- if (ctx && ctx->hostname)
- GF_FREE (ctx->hostname);
- GF_FREE (ctx);
- if (dict) {
- if ((!dict->extra_stdfree) &&
- friend_req->vols.vols_val)
- free (friend_req->vols.vols_val);
- dict_unref (dict);
- } else {
- free (friend_req->vols.vols_val);
- }
- if (event)
- GF_FREE (event->peername);
- GF_FREE (event);
- }
+ rcu_read_unlock();
+ if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
+ if (ctx && ctx->hostname)
+ GF_FREE(ctx->hostname);
+ GF_FREE(ctx);
+ if (dict) {
+ if ((!dict->extra_stdfree) && friend_req->vols.vols_val)
+ free(friend_req->vols.vols_val);
+ dict_unref(dict);
+ } else {
+ free(friend_req->vols.vols_val);
+ }
+ if (event)
+ GF_FREE(event->peername);
+ GF_FREE(event);
+ }
- return ret;
+ return ret;
}
static int
-glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
- char *hostname, int port)
+glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
+ int port)
{
- int ret = -1;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_friend_sm_event_t *event = NULL;
- glusterd_friend_req_ctx_t *ctx = NULL;
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_friend_req_ctx_t *ctx = NULL;
- if (!port)
- port = GF_DEFAULT_BASE_PORT;
+ if (!port)
+ port = GF_DEFAULT_BASE_PORT;
- rcu_read_lock ();
+ rcu_read_lock();
- peerinfo = glusterd_peerinfo_find (uuid, hostname);
+ peerinfo = glusterd_peerinfo_find(uuid, hostname);
- if (peerinfo == NULL) {
- gf_msg ("glusterd", GF_LOG_CRITICAL, 0,
- GD_MSG_REQ_FROM_UNKNOWN_PEER,
- "Received remove-friend from unknown peer %s",
- hostname);
- ret = glusterd_xfer_friend_remove_resp (req, hostname,
- port);
- goto out;
- }
+ if (peerinfo == NULL) {
+ gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
+ "Received remove-friend from unknown peer %s", hostname);
+ ret = glusterd_xfer_friend_remove_resp(req, hostname, port);
+ goto out;
+ }
- ret = glusterd_friend_sm_new_event
- (GD_FRIEND_EVENT_RCVD_REMOVE_FRIEND, &event);
+ ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_RCVD_REMOVE_FRIEND,
+ &event);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_EVENT_NEW_GET_FAIL,
- "event generation failed: %d", ret);
- goto out;
- }
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
+ "event generation failed: %d", ret);
+ goto out;
+ }
- if (hostname)
- event->peername = gf_strdup (hostname);
+ if (hostname)
+ event->peername = gf_strdup(hostname);
- gf_uuid_copy (event->peerid, uuid);
+ gf_uuid_copy(event->peerid, uuid);
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_friend_req_ctx_t);
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
- if (!ctx) {
- gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY, "Unable to allocate memory");
- ret = -1;
- goto out;
- }
+ if (!ctx) {
+ gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Unable to allocate memory");
+ ret = -1;
+ goto out;
+ }
- gf_uuid_copy (ctx->uuid, uuid);
- if (hostname)
- ctx->hostname = gf_strdup (hostname);
- ctx->req = req;
+ gf_uuid_copy(ctx->uuid, uuid);
+ if (hostname)
+ ctx->hostname = gf_strdup(hostname);
+ ctx->req = req;
- event->ctx = ctx;
+ event->ctx = ctx;
- ret = glusterd_friend_sm_inject_event (event);
+ ret = glusterd_friend_sm_inject_event(event);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_EVENT_INJECT_FAIL, "Unable to inject event %d, "
- "ret = %d", event->event, ret);
- goto out;
- }
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
+ "Unable to inject event %d, "
+ "ret = %d",
+ event->event, ret);
+ goto out;
+ }
- ret = 0;
+ ret = 0;
out:
- rcu_read_unlock ();
-
- if (0 != ret) {
- if (ctx && ctx->hostname)
- GF_FREE (ctx->hostname);
- GF_FREE (ctx);
- if (event)
- GF_FREE (event->peername);
- GF_FREE (event);
- }
-
- return ret;
+ rcu_read_unlock();
+
+ if (0 != ret) {
+ if (ctx && ctx->hostname)
+ GF_FREE(ctx->hostname);
+ GF_FREE(ctx);
+ if (event)
+ GF_FREE(event->peername);
+ GF_FREE(event);
+ }
+
+ return ret;
}
struct args_pack {
@@ -289,3326 +282,3321 @@ struct args_pack {
};
static int
-_build_option_key (dict_t *d, char *k, data_t *v, void *tmp)
+_build_option_key(dict_t *d, char *k, data_t *v, void *tmp)
{
- char reconfig_key[256] = {0, };
- int keylen;
- struct args_pack *pack = NULL;
- int ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- pack = tmp;
- if (strcmp (k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
- return 0;
-
- if (priv->op_version > GD_OP_VERSION_MIN) {
- if ((strcmp (k, "features.limit-usage") == 0) ||
- (strcmp (k, "features.soft-limit") == 0))
- return 0;
- }
+ char reconfig_key[256] = {
+ 0,
+ };
+ int keylen;
+ struct args_pack *pack = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ pack = tmp;
+ if (strcmp(k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
+ return 0;
- /* snap-max-hard-limit and snap-max-soft-limit are system *
- * options set and managed by snapshot config option. Hence *
- * they should not be displayed in gluster volume info. *
- */
- if ((strcmp (k, "snap-max-hard-limit") == 0) ||
- (strcmp (k, "snap-max-soft-limit") == 0))
- return 0;
+ if (priv->op_version > GD_OP_VERSION_MIN) {
+ if ((strcmp(k, "features.limit-usage") == 0) ||
+ (strcmp(k, "features.soft-limit") == 0))
+ return 0;
+ }
+
+ /* snap-max-hard-limit and snap-max-soft-limit are system *
+ * options set and managed by snapshot config option. Hence *
+ * they should not be displayed in gluster volume info. *
+ */
+ if ((strcmp(k, "snap-max-hard-limit") == 0) ||
+ (strcmp(k, "snap-max-soft-limit") == 0))
+ return 0;
- keylen = snprintf (reconfig_key, sizeof (reconfig_key),
- "volume%d.option.%s", pack->vol_count, k);
- ret = dict_set_strn (pack->dict, reconfig_key, keylen, v->data);
- if (0 == ret)
- pack->opt_count++;
+ keylen = snprintf(reconfig_key, sizeof(reconfig_key), "volume%d.option.%s",
+ pack->vol_count, k);
+ ret = dict_set_strn(pack->dict, reconfig_key, keylen, v->data);
+ if (0 == ret)
+ pack->opt_count++;
- return 0;
+ return 0;
}
int
-glusterd_add_tier_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
- dict_t *dict, int count)
+glusterd_add_tier_volume_detail_to_dict(glusterd_volinfo_t *volinfo,
+ dict_t *dict, int count)
{
- int ret = -1;
- char key[64] = {0,};
- int keylen;
-
- GF_ASSERT (volinfo);
- GF_ASSERT (dict);
-
- keylen = snprintf (key, sizeof (key), "volume%d.cold_type", count);
- ret = dict_set_int32n (dict, key, keylen,
- volinfo->tier_info.cold_type);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.cold_brick_count",
- count);
- ret = dict_set_int32n (dict, key, keylen,
- volinfo->tier_info.cold_brick_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.cold_dist_count",
- count);
- ret = dict_set_int32n (dict, key, keylen,
- volinfo->tier_info.cold_dist_leaf_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.cold_replica_count",
- count);
- ret = dict_set_int32n (dict, key, keylen,
- volinfo->tier_info.cold_replica_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.cold_arbiter_count",
- count);
- ret = dict_set_int32n (dict, key, keylen, volinfo->arbiter_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.cold_disperse_count",
- count);
- ret = dict_set_int32n (dict, key, keylen,
- volinfo->tier_info.cold_disperse_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key),
- "volume%d.cold_redundancy_count", count);
- ret = dict_set_int32n (dict, key, keylen,
- volinfo->tier_info.cold_redundancy_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.hot_type", count);
- ret = dict_set_int32n (dict, key, keylen,
- volinfo->tier_info.hot_type);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.hot_brick_count",
- count);
- ret = dict_set_int32n (dict, key, keylen,
- volinfo->tier_info.hot_brick_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.hot_replica_count",
- count);
- ret = dict_set_int32n (dict, key, keylen,
- volinfo->tier_info.hot_replica_count);
- if (ret)
- goto out;
+ int ret = -1;
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+
+ GF_ASSERT(volinfo);
+ GF_ASSERT(dict);
+
+ keylen = snprintf(key, sizeof(key), "volume%d.cold_type", count);
+ ret = dict_set_int32n(dict, key, keylen, volinfo->tier_info.cold_type);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.cold_brick_count", count);
+ ret = dict_set_int32n(dict, key, keylen,
+ volinfo->tier_info.cold_brick_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.cold_dist_count", count);
+ ret = dict_set_int32n(dict, key, keylen,
+ volinfo->tier_info.cold_dist_leaf_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.cold_replica_count", count);
+ ret = dict_set_int32n(dict, key, keylen,
+ volinfo->tier_info.cold_replica_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.cold_arbiter_count", count);
+ ret = dict_set_int32n(dict, key, keylen, volinfo->arbiter_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.cold_disperse_count", count);
+ ret = dict_set_int32n(dict, key, keylen,
+ volinfo->tier_info.cold_disperse_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.cold_redundancy_count",
+ count);
+ ret = dict_set_int32n(dict, key, keylen,
+ volinfo->tier_info.cold_redundancy_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.hot_type", count);
+ ret = dict_set_int32n(dict, key, keylen, volinfo->tier_info.hot_type);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.hot_brick_count", count);
+ ret = dict_set_int32n(dict, key, keylen,
+ volinfo->tier_info.hot_brick_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.hot_replica_count", count);
+ ret = dict_set_int32n(dict, key, keylen,
+ volinfo->tier_info.hot_replica_count);
+ if (ret)
+ goto out;
out:
- return ret;
-
+ return ret;
}
int
-glusterd_add_arbiter_info_to_bricks (glusterd_volinfo_t *volinfo,
- dict_t *volumes, int count)
+glusterd_add_arbiter_info_to_bricks(glusterd_volinfo_t *volinfo,
+ dict_t *volumes, int count)
{
- char key[64] = {0, };
- int keylen;
- int i = 0;
- int start_index = 0;
- int ret = 0;
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- /*TODO: Add info for hot tier once attach tier of arbiter
- * volumes is supported. */
-
- /* cold tier */
- if (volinfo->tier_info.cold_replica_count == 1 ||
- volinfo->arbiter_count != 1)
- return 0;
-
- i = start_index = volinfo->tier_info.hot_brick_count + 1;
- for (; i <= volinfo->brick_count; i++) {
- if ((i - start_index + 1) %
- volinfo->tier_info.cold_replica_count != 0)
- continue;
- keylen = snprintf (key, sizeof (key),
- "volume%d.brick%d.isArbiter",
- count, i);
- ret = dict_set_int32n (volumes, key, keylen, 1);
- if (ret)
- return ret;
- }
- } else {
- if (volinfo->replica_count == 1 || volinfo->arbiter_count != 1)
- return 0;
- for (i = 1; i <= volinfo->brick_count; i++) {
- if (i % volinfo->replica_count != 0)
- continue;
- keylen = snprintf (key, sizeof (key),
- "volume%d.brick%d.isArbiter",
- count, i);
- ret = dict_set_int32n (volumes, key, keylen, 1);
- if (ret)
- return ret;
- }
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+ int i = 0;
+ int start_index = 0;
+ int ret = 0;
+
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ /*TODO: Add info for hot tier once attach tier of arbiter
+ * volumes is supported. */
+
+ /* cold tier */
+ if (volinfo->tier_info.cold_replica_count == 1 ||
+ volinfo->arbiter_count != 1)
+ return 0;
+
+ i = start_index = volinfo->tier_info.hot_brick_count + 1;
+ for (; i <= volinfo->brick_count; i++) {
+ if ((i - start_index + 1) % volinfo->tier_info.cold_replica_count !=
+ 0)
+ continue;
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter",
+ count, i);
+ ret = dict_set_int32n(volumes, key, keylen, 1);
+ if (ret)
+ return ret;
}
- return 0;
+ } else {
+ if (volinfo->replica_count == 1 || volinfo->arbiter_count != 1)
+ return 0;
+ for (i = 1; i <= volinfo->brick_count; i++) {
+ if (i % volinfo->replica_count != 0)
+ continue;
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter",
+ count, i);
+ ret = dict_set_int32n(volumes, key, keylen, 1);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
}
int
-glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
- dict_t *volumes, int count)
+glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
+ int count)
{
-
- int ret = -1;
- char key[64] = {0, };
- int keylen;
- glusterd_brickinfo_t *brickinfo = NULL;
- char *buf = NULL;
- int i = 1;
- dict_t *dict = NULL;
- glusterd_conf_t *priv = NULL;
- char *volume_id_str = NULL;
- struct args_pack pack = {0,};
- xlator_t *this = NULL;
- GF_UNUSED int caps = 0;
- int32_t len = 0;
-
- GF_ASSERT (volinfo);
- GF_ASSERT (volumes);
-
- this = THIS;
- priv = this->private;
-
- GF_ASSERT (priv);
-
- keylen = snprintf (key, sizeof (key), "volume%d.name", count);
- ret = dict_set_strn (volumes, key, keylen, volinfo->volname);
+ int ret = -1;
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ char *buf = NULL;
+ int i = 1;
+ dict_t *dict = NULL;
+ glusterd_conf_t *priv = NULL;
+ char *volume_id_str = NULL;
+ struct args_pack pack = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ GF_UNUSED int caps = 0;
+ int32_t len = 0;
+
+ GF_ASSERT(volinfo);
+ GF_ASSERT(volumes);
+
+ this = THIS;
+ priv = this->private;
+
+ GF_ASSERT(priv);
+
+ keylen = snprintf(key, sizeof(key), "volume%d.name", count);
+ ret = dict_set_strn(volumes, key, keylen, volinfo->volname);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.type", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->type);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.status", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->status);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.brick_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->brick_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.hot_brick_count", count);
+ ret = dict_set_int32n(volumes, key, keylen,
+ volinfo->tier_info.hot_brick_count);
+ if (ret)
+ goto out;
+
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ ret = glusterd_add_tier_volume_detail_to_dict(volinfo, volumes, count);
if (ret)
- goto out;
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "volume%d.dist_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->dist_leaf_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.stripe_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->stripe_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.replica_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->replica_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.disperse_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->disperse_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.redundancy_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->redundancy_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.arbiter_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->arbiter_count);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.transport", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->transport_type);
+ if (ret)
+ goto out;
+
+ volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id));
+ if (!volume_id_str)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.volume_id", count);
+ ret = dict_set_dynstrn(volumes, key, keylen, volume_id_str);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.rebalance", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->rebal.defrag_cmd);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "volume%d.snap_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->snap_count);
+ if (ret)
+ goto out;
- keylen = snprintf (key, sizeof (key), "volume%d.type", count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->type);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.status", count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->status);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.brick_count", count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->brick_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.hot_brick_count",
- count);
- ret = dict_set_int32n (volumes, key, keylen,
- volinfo->tier_info.hot_brick_count);
- if (ret)
- goto out;
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_add_tier_volume_detail_to_dict (volinfo,
- volumes, count);
- if (ret)
- goto out;
+#ifdef HAVE_BD_XLATOR
+ if (volinfo->caps) {
+ caps = 0;
+ keylen = snprintf(key, sizeof(key), "volume%d.xlator0", count);
+ buf = GF_MALLOC(256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
+ goto out;
+ }
+ if (volinfo->caps & CAPS_BD)
+ snprintf(buf, 256, "BD");
+ ret = dict_set_dynstrn(volumes, key, keylen, buf);
+ if (ret) {
+ GF_FREE(buf);
+ goto out;
}
- keylen = snprintf (key, sizeof (key), "volume%d.dist_count", count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->dist_leaf_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.stripe_count", count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->stripe_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.replica_count",
- count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->replica_count);
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.disperse_count",
- count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->disperse_count);
- if (ret)
+ if (volinfo->caps & CAPS_THIN) {
+ snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC(256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.redundancy_count",
- count);
- ret = dict_set_int32n (volumes, key, keylen,
- volinfo->redundancy_count);
- if (ret)
+ }
+ snprintf(buf, 256, "thin");
+ ret = dict_set_dynstr(volumes, key, buf);
+ if (ret) {
+ GF_FREE(buf);
goto out;
+ }
+ }
- keylen = snprintf (key, sizeof (key), "volume%d.arbiter_count",
- count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->arbiter_count);
- if (ret)
+ if (volinfo->caps & CAPS_OFFLOAD_COPY) {
+ snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC(256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.transport", count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->transport_type);
- if (ret)
+ }
+ snprintf(buf, 256, "offload_copy");
+ ret = dict_set_dynstr(volumes, key, buf);
+ if (ret) {
+ GF_FREE(buf);
goto out;
+ }
+ }
- volume_id_str = gf_strdup (uuid_utoa (volinfo->volume_id));
- if (!volume_id_str)
+ if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) {
+ snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC(256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.volume_id", count);
- ret = dict_set_dynstrn (volumes, key, keylen, volume_id_str);
- if (ret)
+ }
+ snprintf(buf, 256, "offload_snapshot");
+ ret = dict_set_dynstr(volumes, key, buf);
+ if (ret) {
+ GF_FREE(buf);
goto out;
+ }
+ }
- keylen = snprintf (key, sizeof (key), "volume%d.rebalance", count);
- ret = dict_set_int32n (volumes, key, keylen,
- volinfo->rebal.defrag_cmd);
- if (ret)
+ if (volinfo->caps & CAPS_OFFLOAD_ZERO) {
+ snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
+ caps++);
+ buf = GF_MALLOC(256, gf_common_mt_char);
+ if (!buf) {
+ ret = ENOMEM;
goto out;
-
- keylen = snprintf (key, sizeof (key), "volume%d.snap_count", count);
- ret = dict_set_int32n (volumes, key, keylen, volinfo->snap_count);
- if (ret)
+ }
+ snprintf(buf, 256, "offload_zerofill");
+ ret = dict_set_dynstr(volumes, key, buf);
+ if (ret) {
+ GF_FREE(buf);
goto out;
-
-#ifdef HAVE_BD_XLATOR
- if (volinfo->caps) {
- caps = 0;
- keylen = snprintf (key, sizeof (key), "volume%d.xlator0",
- count);
- buf = GF_MALLOC (256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- if (volinfo->caps & CAPS_BD)
- snprintf (buf, 256, "BD");
- ret = dict_set_dynstrn (volumes, key, keylen, buf);
- if (ret) {
- GF_FREE (buf);
- goto out;
- }
-
- if (volinfo->caps & CAPS_THIN) {
- snprintf (key, sizeof (key), "volume%d.xlator0.caps%d",
- count, caps++);
- buf = GF_MALLOC (256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf (buf, 256, "thin");
- ret = dict_set_dynstr (volumes, key, buf);
- if (ret) {
- GF_FREE (buf);
- goto out;
- }
- }
-
- if (volinfo->caps & CAPS_OFFLOAD_COPY) {
- snprintf (key, sizeof (key), "volume%d.xlator0.caps%d",
- count, caps++);
- buf = GF_MALLOC (256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf (buf, 256, "offload_copy");
- ret = dict_set_dynstr (volumes, key, buf);
- if (ret) {
- GF_FREE (buf);
- goto out;
- }
- }
-
- if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) {
- snprintf (key, sizeof (key), "volume%d.xlator0.caps%d",
- count, caps++);
- buf = GF_MALLOC (256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf (buf, 256, "offload_snapshot");
- ret = dict_set_dynstr (volumes, key, buf);
- if (ret) {
- GF_FREE (buf);
- goto out;
- }
- }
-
- if (volinfo->caps & CAPS_OFFLOAD_ZERO) {
- snprintf (key, sizeof (key), "volume%d.xlator0.caps%d",
- count, caps++);
- buf = GF_MALLOC (256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf (buf, 256, "offload_zerofill");
- ret = dict_set_dynstr (volumes, key, buf);
- if (ret) {
- GF_FREE (buf);
- goto out;
- }
- }
-
+ }
}
+ }
#endif
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- char brick[1024] = {0,};
- char brick_uuid[64] = {0,};
- len = snprintf (brick, sizeof (brick), "%s:%s",
- brickinfo->hostname, brickinfo->path);
- if ((len < 0) || (len >= sizeof (brick))) {
- ret = -1;
- goto out;
- }
- buf = gf_strdup (brick);
- keylen = snprintf (key, sizeof (key), "volume%d.brick%d",
- count, i);
- ret = dict_set_dynstrn (volumes, key, keylen, buf);
- if (ret)
- goto out;
- keylen = snprintf (key, sizeof (key), "volume%d.brick%d.uuid",
- count, i);
- snprintf (brick_uuid, sizeof (brick_uuid), "%s",
- uuid_utoa (brickinfo->uuid));
- buf = gf_strdup (brick_uuid);
- if (!buf)
- goto out;
- ret = dict_set_dynstrn (volumes, key, keylen, buf);
- if (ret)
- goto out;
-
-#ifdef HAVE_BD_XLATOR
- if (volinfo->caps & CAPS_BD) {
- snprintf (key, sizeof (key), "volume%d.vg%d",
- count, i);
- snprintf (brick, sizeof (brick), "%s", brickinfo->vg);
- buf = gf_strdup (brick);
- ret = dict_set_dynstr (volumes, key, buf);
- if (ret)
- goto out;
- }
-#endif
- i++;
- }
- ret = glusterd_add_arbiter_info_to_bricks (volinfo, volumes, count);
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ char brick[1024] = {
+ 0,
+ };
+ char brick_uuid[64] = {
+ 0,
+ };
+ len = snprintf(brick, sizeof(brick), "%s:%s", brickinfo->hostname,
+ brickinfo->path);
+ if ((len < 0) || (len >= sizeof(brick))) {
+ ret = -1;
+ goto out;
+ }
+ buf = gf_strdup(brick);
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d", count, i);
+ ret = dict_set_dynstrn(volumes, key, keylen, buf);
if (ret)
- goto out;
+ goto out;
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d.uuid", count, i);
+ snprintf(brick_uuid, sizeof(brick_uuid), "%s",
+ uuid_utoa(brickinfo->uuid));
+ buf = gf_strdup(brick_uuid);
+ if (!buf)
+ goto out;
+ ret = dict_set_dynstrn(volumes, key, keylen, buf);
+ if (ret)
+ goto out;
- dict = volinfo->dict;
- if (!dict) {
- ret = 0;
+#ifdef HAVE_BD_XLATOR
+ if (volinfo->caps & CAPS_BD) {
+ snprintf(key, sizeof(key), "volume%d.vg%d", count, i);
+ snprintf(brick, sizeof(brick), "%s", brickinfo->vg);
+ buf = gf_strdup(brick);
+ ret = dict_set_dynstr(volumes, key, buf);
+ if (ret)
goto out;
}
+#endif
+ i++;
+ }
+ ret = glusterd_add_arbiter_info_to_bricks(volinfo, volumes, count);
+ if (ret)
+ goto out;
+
+ dict = volinfo->dict;
+ if (!dict) {
+ ret = 0;
+ goto out;
+ }
- pack.dict = volumes;
- pack.vol_count = count;
- pack.opt_count = 0;
- dict_foreach (dict, _build_option_key, (void *) &pack);
- dict_foreach (priv->opts, _build_option_key, &pack);
+ pack.dict = volumes;
+ pack.vol_count = count;
+ pack.opt_count = 0;
+ dict_foreach(dict, _build_option_key, (void *)&pack);
+ dict_foreach(priv->opts, _build_option_key, &pack);
- keylen = snprintf (key, sizeof (key), "volume%d.opt_count",
- pack.vol_count);
- ret = dict_set_int32n (volumes, key, keylen, pack.opt_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.opt_count", pack.vol_count);
+ ret = dict_set_int32n(volumes, key, keylen, pack.opt_count);
out:
- return ret;
+ return ret;
}
int32_t
-glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
- char *err_str, size_t err_len)
+glusterd_op_txn_begin(rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
+ char *err_str, size_t err_len)
{
- int32_t ret = -1;
- dict_t *dict = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- int32_t locked = 0;
- char *tmp = NULL;
- char *volname = NULL;
- uuid_t *txn_id = NULL;
- glusterd_op_info_t txn_op_info = {{0},};
- glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
- uint32_t op_errno = 0;
- uint32_t timeout = 0;
-
- GF_ASSERT (req);
- GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX));
- GF_ASSERT (NULL != ctx);
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- dict = ctx;
-
- /* Generate a transaction-id for this operation and
- * save it in the dict. This transaction id distinguishes
- * each transaction, and helps separate opinfos in the
- * op state machine. */
- ret = glusterd_generate_txn_id (dict, &txn_id);
+ int32_t ret = -1;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t locked = 0;
+ char *tmp = NULL;
+ char *volname = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_op_info_t txn_op_info = {
+ {0},
+ };
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ uint32_t op_errno = 0;
+ uint32_t timeout = 0;
+
+ GF_ASSERT(req);
+ GF_ASSERT((op > GD_OP_NONE) && (op < GD_OP_MAX));
+ GF_ASSERT(NULL != ctx);
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ dict = ctx;
+
+ /* Generate a transaction-id for this operation and
+ * save it in the dict. This transaction id distinguishes
+ * each transaction, and helps separate opinfos in the
+ * op state machine. */
+ ret = glusterd_generate_txn_id(dict, &txn_id);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_IDGEN_FAIL,
+ "Failed to generate transaction id");
+ goto out;
+ }
+
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ ret = glusterd_set_originator_uuid(dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UUID_SET_FAIL,
+ "Failed to set originator_uuid.");
+ goto out;
+ }
+
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_3_6_0) {
+ ret = glusterd_lock(MY_UUID);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_IDGEN_FAIL,
- "Failed to generate transaction id");
- goto out;
- }
-
- /* Save the MY_UUID as the originator_uuid. This originator_uuid
- * will be used by is_origin_glusterd() to determine if a node
- * is the originator node for a command. */
- ret = glusterd_set_originator_uuid (dict);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_LOCK_FAIL,
+ "Unable to acquire lock on localhost, ret: %d", ret);
+ snprintf(err_str, err_len,
+ "Another transaction is in progress. "
+ "Please try again after some time.");
+ goto out;
+ }
+ } else {
+ /* If no volname is given as a part of the command, locks will
+ * not be held */
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &tmp);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_UUID_SET_FAIL,
- "Failed to set originator_uuid.");
+ gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED,
+ "No Volume name present. "
+ "Locks not being held.");
+ goto local_locking_done;
+ } else {
+ /* Use a copy of volname, as cli response will be
+ * sent before the unlock, and the volname in the
+ * dict, might be removed */
+ volname = gf_strdup(tmp);
+ if (!volname)
goto out;
}
- /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
- if (priv->op_version < GD_OP_VERSION_3_6_0) {
- ret = glusterd_lock (MY_UUID);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLUSTERD_LOCK_FAIL,
- "Unable to acquire lock on localhost, ret: %d",
- ret);
- snprintf (err_str, err_len,
- "Another transaction is in progress. "
- "Please try again after some time.");
- goto out;
- }
- } else {
- /* If no volname is given as a part of the command, locks will
- * not be held */
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &tmp);
- if (ret) {
- gf_msg (this->name, GF_LOG_INFO, errno,
- GD_MSG_DICT_GET_FAILED,
- "No Volume name present. "
- "Locks not being held.");
- goto local_locking_done;
- } else {
- /* Use a copy of volname, as cli response will be
- * sent before the unlock, and the volname in the
- * dict, might be removed */
- volname = gf_strdup (tmp);
- if (!volname)
- goto out;
- }
+ /* Cli will add timeout key to dict if the default timeout is
+ * other than 2 minutes. Here we use this value to check whether
+ * mgmt_v3_lock_timeout should be set to default value or we
+ * need to change the value according to timeout value
+ * i.e, timeout + 120 seconds. */
+ ret = dict_get_uint32(dict, "timeout", &timeout);
+ if (!ret)
+ priv->mgmt_v3_lock_timeout = timeout + 120;
- /* Cli will add timeout key to dict if the default timeout is
- * other than 2 minutes. Here we use this value to check whether
- * mgmt_v3_lock_timeout should be set to default value or we
- * need to change the value according to timeout value
- * i.e, timeout + 120 seconds. */
- ret = dict_get_uint32 (dict, "timeout", &timeout);
- if (!ret)
- priv->mgmt_v3_lock_timeout = timeout + 120;
-
- ret = glusterd_mgmt_v3_lock (volname, MY_UUID, &op_errno,
- "vol");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL,
- "Unable to acquire lock for %s", volname);
- snprintf (err_str, err_len,
- "Another transaction is in progress for %s. "
- "Please try again after some time.", volname);
- goto out;
- }
+ ret = glusterd_mgmt_v3_lock(volname, MY_UUID, &op_errno, "vol");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "Unable to acquire lock for %s", volname);
+ snprintf(err_str, err_len,
+ "Another transaction is in progress for %s. "
+ "Please try again after some time.",
+ volname);
+ goto out;
}
+ }
- locked = 1;
- gf_msg_debug (this->name, 0, "Acquired lock on localhost");
+ locked = 1;
+ gf_msg_debug(this->name, 0, "Acquired lock on localhost");
local_locking_done:
- /* If no volname is given as a part of the command, locks will
- * not be held, hence sending stage event. */
- if (volname || (priv->op_version < GD_OP_VERSION_3_6_0))
- event_type = GD_OP_EVENT_START_LOCK;
- else {
- txn_op_info.state.state = GD_OP_STATE_LOCK_SENT;
- event_type = GD_OP_EVENT_ALL_ACC;
- }
-
- /* Save opinfo for this transaction with the transaction id */
- glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, ctx, req);
-
- ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set transaction's opinfo");
- if (ctx)
- dict_unref (ctx);
- goto out;
- }
-
- ret = glusterd_op_sm_inject_event (event_type, txn_id, ctx);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_EVENT_INJECT_FAIL, "Failed to acquire cluster"
- " lock.");
- goto out;
- }
+ /* If no volname is given as a part of the command, locks will
+ * not be held, hence sending stage event. */
+ if (volname || (priv->op_version < GD_OP_VERSION_3_6_0))
+ event_type = GD_OP_EVENT_START_LOCK;
+ else {
+ txn_op_info.state.state = GD_OP_STATE_LOCK_SENT;
+ event_type = GD_OP_EVENT_ALL_ACC;
+ }
+
+ /* Save opinfo for this transaction with the transaction id */
+ glusterd_txn_opinfo_init(&txn_op_info, NULL, &op, ctx, req);
+
+ ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set transaction's opinfo");
+ if (ctx)
+ dict_unref(ctx);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event(event_type, txn_id, ctx);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
+ "Failed to acquire cluster"
+ " lock.");
+ goto out;
+ }
out:
- if (locked && ret) {
- /* Based on the op-version, we release the
- * cluster or mgmt_v3 lock */
- if (priv->op_version < GD_OP_VERSION_3_6_0)
- glusterd_unlock (MY_UUID);
- else {
- ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
- "vol");
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Unable to release lock for %s",
- volname);
- ret = -1;
- }
+ if (locked && ret) {
+ /* Based on the op-version, we release the
+ * cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_3_6_0)
+ glusterd_unlock(MY_UUID);
+ else {
+ ret = glusterd_mgmt_v3_unlock(volname, MY_UUID, "vol");
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Unable to release lock for %s", volname);
+ ret = -1;
}
+ }
- if (volname)
- GF_FREE (volname);
+ if (volname)
+ GF_FREE(volname);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-__glusterd_handle_cluster_lock (rpcsvc_request_t *req)
+__glusterd_handle_cluster_lock(rpcsvc_request_t *req)
{
- dict_t *op_ctx = NULL;
- int32_t ret = -1;
- gd1_mgmt_cluster_lock_req lock_req = {{0},};
- glusterd_op_lock_ctx_t *ctx = NULL;
- glusterd_op_sm_event_type_t op = GD_OP_EVENT_LOCK;
- glusterd_op_info_t txn_op_info = {{0},};
- glusterd_conf_t *priv = NULL;
- uuid_t *txn_id = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (req);
-
- txn_id = &priv->global_txn_id;
-
- ret = xdr_to_generic (req->msg[0], &lock_req,
- (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode lock "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_msg_debug (this->name, 0, "Received LOCK from uuid: %s",
- uuid_utoa (lock_req.uuid));
-
- rcu_read_lock ();
- ret = (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL);
- rcu_read_unlock ();
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (lock_req.uuid));
- ret = -1;
- goto out;
- }
-
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
-
- if (!ctx) {
- //respond here
- return -1;
- }
-
- gf_uuid_copy (ctx->uuid, lock_req.uuid);
- ctx->req = req;
- ctx->dict = NULL;
-
- op_ctx = dict_new ();
- if (!op_ctx) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_DICT_CREATE_FAIL,
- "Unable to set new dict");
- goto out;
- }
-
- glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, op_ctx, req);
-
- ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set transaction's opinfo");
- dict_unref (txn_op_info.op_ctx);
- goto out;
- }
-
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK, txn_id, ctx);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_EVENT_INJECT_FAIL,
- "Failed to inject event GD_OP_EVENT_LOCK");
+ dict_t *op_ctx = NULL;
+ int32_t ret = -1;
+ gd1_mgmt_cluster_lock_req lock_req = {
+ {0},
+ };
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_op_sm_event_type_t op = GD_OP_EVENT_LOCK;
+ glusterd_op_info_t txn_op_info = {
+ {0},
+ };
+ glusterd_conf_t *priv = NULL;
+ uuid_t *txn_id = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(req);
+
+ txn_id = &priv->global_txn_id;
+
+ ret = xdr_to_generic(req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode lock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg_debug(this->name, 0, "Received LOCK from uuid: %s",
+ uuid_utoa(lock_req.uuid));
+
+ rcu_read_lock();
+ ret = (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL);
+ rcu_read_unlock();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
+
+ if (!ctx) {
+ // respond here
+ return -1;
+ }
+
+ gf_uuid_copy(ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+ ctx->dict = NULL;
+
+ op_ctx = dict_new();
+ if (!op_ctx) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ "Unable to set new dict");
+ goto out;
+ }
+
+ glusterd_txn_opinfo_init(&txn_op_info, NULL, &op, op_ctx, req);
+
+ ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set transaction's opinfo");
+ dict_unref(txn_op_info.op_ctx);
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_LOCK, txn_id, ctx);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
+ "Failed to inject event GD_OP_EVENT_LOCK");
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_cluster_lock (rpcsvc_request_t *req)
+glusterd_handle_cluster_lock(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_cluster_lock);
+ return glusterd_big_locked_handler(req, __glusterd_handle_cluster_lock);
}
static int
-glusterd_req_ctx_create (rpcsvc_request_t *rpc_req,
- int op, uuid_t uuid,
- char *buf_val, size_t buf_len,
- gf_gld_mem_types_t mem_type,
- glusterd_req_ctx_t **req_ctx_out)
+glusterd_req_ctx_create(rpcsvc_request_t *rpc_req, int op, uuid_t uuid,
+ char *buf_val, size_t buf_len,
+ gf_gld_mem_types_t mem_type,
+ glusterd_req_ctx_t **req_ctx_out)
{
- int ret = -1;
- char str[50] = {0,};
- glusterd_req_ctx_t *req_ctx = NULL;
- dict_t *dict = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- gf_uuid_unparse (uuid, str);
- gf_msg_debug (this->name, 0, "Received op from uuid %s", str);
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- req_ctx = GF_CALLOC (1, sizeof (*req_ctx), mem_type);
- if (!req_ctx) {
- goto out;
- }
-
- gf_uuid_copy (req_ctx->uuid, uuid);
- req_ctx->op = op;
- ret = dict_unserialize (buf_val, buf_len, &dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
- goto out;
- }
-
- req_ctx->dict = dict;
- req_ctx->req = rpc_req;
- *req_ctx_out = req_ctx;
- ret = 0;
+ int ret = -1;
+ char str[50] = {
+ 0,
+ };
+ glusterd_req_ctx_t *req_ctx = NULL;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ gf_uuid_unparse(uuid, str);
+ gf_msg_debug(this->name, 0, "Received op from uuid %s", str);
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ req_ctx = GF_CALLOC(1, sizeof(*req_ctx), mem_type);
+ if (!req_ctx) {
+ goto out;
+ }
+
+ gf_uuid_copy(req_ctx->uuid, uuid);
+ req_ctx->op = op;
+ ret = dict_unserialize(buf_val, buf_len, &dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ req_ctx->dict = dict;
+ req_ctx->req = rpc_req;
+ *req_ctx_out = req_ctx;
+ ret = 0;
out:
- if (ret) {
- if (dict)
- dict_unref (dict);
- GF_FREE (req_ctx);
- }
- return ret;
+ if (ret) {
+ if (dict)
+ dict_unref(dict);
+ GF_FREE(req_ctx);
+ }
+ return ret;
}
int
-__glusterd_handle_stage_op (rpcsvc_request_t *req)
+__glusterd_handle_stage_op(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- glusterd_req_ctx_t *req_ctx = NULL;
- gd1_mgmt_stage_op_req op_req = {{0},};
- xlator_t *this = NULL;
- uuid_t *txn_id = NULL;
- glusterd_op_info_t txn_op_info = {{0},};
- glusterd_op_sm_state_info_t state = {0,};
- glusterd_conf_t *priv = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (req);
-
- txn_id = &priv->global_txn_id;
-
- ret = xdr_to_generic (req->msg[0], &op_req,
- (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode stage "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
- op_req.buf.buf_val, op_req.buf.buf_len,
- gf_gld_mt_op_stage_ctx_t, &req_ctx);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_CTX_CREATE_FAIL, "Failed to create req_ctx");
- goto out;
- }
-
- ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
- gf_msg_debug (this->name, 0, "transaction ID = %s",
- uuid_utoa (*txn_id));
-
- rcu_read_lock ();
- ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
- rcu_read_unlock ();
+ int32_t ret = -1;
+ glusterd_req_ctx_t *req_ctx = NULL;
+ gd1_mgmt_stage_op_req op_req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_op_info_t txn_op_info = {
+ {0},
+ };
+ glusterd_op_sm_state_info_t state = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(req);
+
+ txn_id = &priv->global_txn_id;
+
+ ret = xdr_to_generic(req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode stage "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ ret = glusterd_req_ctx_create(req, op_req.op, op_req.uuid,
+ op_req.buf.buf_val, op_req.buf.buf_len,
+ gf_gld_mt_op_stage_ctx_t, &req_ctx);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_CTX_CREATE_FAIL,
+ "Failed to create req_ctx");
+ goto out;
+ }
+
+ ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
+ gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
+
+ rcu_read_lock();
+ ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
+ rcu_read_unlock();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ /* In cases where there is no volname, the receivers won't have a
+ * transaction opinfo created, as for those operations, the locking
+ * phase where the transaction opinfos are created, won't be called. */
+ ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "No transaction's opinfo set");
+
+ state.state = GD_OP_STATE_LOCKED;
+ glusterd_txn_opinfo_init(&txn_op_info, &state, &op_req.op,
+ req_ctx->dict, req);
+
+ txn_op_info.skip_locking = _gf_true;
+ ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (op_req.uuid));
- ret = -1;
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set transaction's opinfo");
+ dict_unref(req_ctx->dict);
+ goto out;
}
+ }
- /* In cases where there is no volname, the receivers won't have a
- * transaction opinfo created, as for those operations, the locking
- * phase where the transaction opinfos are created, won't be called. */
- ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
- if (ret) {
- gf_msg_debug (this->name, 0,
- "No transaction's opinfo set");
-
- state.state = GD_OP_STATE_LOCKED;
- glusterd_txn_opinfo_init (&txn_op_info, &state, &op_req.op,
- req_ctx->dict, req);
-
- txn_op_info.skip_locking = _gf_true;
- ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set transaction's opinfo");
- dict_unref (req_ctx->dict);
- goto out;
- }
- }
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_STAGE_OP, txn_id, req_ctx);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
+ "Failed to inject event GD_OP_EVENT_STAGE_OP");
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_OP,
- txn_id, req_ctx);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_EVENT_INJECT_FAIL,
- "Failed to inject event GD_OP_EVENT_STAGE_OP");
-
- out:
- free (op_req.buf.buf_val);//malloced by xdr
- glusterd_friend_sm ();
- glusterd_op_sm ();
- return ret;
+out:
+ free(op_req.buf.buf_val); // malloced by xdr
+ glusterd_friend_sm();
+ glusterd_op_sm();
+ return ret;
}
int
-glusterd_handle_stage_op (rpcsvc_request_t *req)
+glusterd_handle_stage_op(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_stage_op);
+ return glusterd_big_locked_handler(req, __glusterd_handle_stage_op);
}
-
int
-__glusterd_handle_commit_op (rpcsvc_request_t *req)
+__glusterd_handle_commit_op(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- glusterd_req_ctx_t *req_ctx = NULL;
- gd1_mgmt_commit_op_req op_req = {{0},};
- xlator_t *this = NULL;
- uuid_t *txn_id = NULL;
- glusterd_conf_t *priv = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (req);
-
- txn_id = &priv->global_txn_id;
-
- ret = xdr_to_generic (req->msg[0], &op_req,
- (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode commit "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- rcu_read_lock ();
- ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
- rcu_read_unlock ();
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (op_req.uuid));
- ret = -1;
- goto out;
- }
+ int32_t ret = -1;
+ glusterd_req_ctx_t *req_ctx = NULL;
+ gd1_mgmt_commit_op_req op_req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(req);
+
+ txn_id = &priv->global_txn_id;
+
+ ret = xdr_to_generic(req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode commit "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ rcu_read_lock();
+ ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
+ rcu_read_unlock();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(op_req.uuid));
+ ret = -1;
+ goto out;
+ }
- //the structures should always be equal
- GF_ASSERT (sizeof (gd1_mgmt_commit_op_req) == sizeof (gd1_mgmt_stage_op_req));
- ret = glusterd_req_ctx_create (req, op_req.op, op_req.uuid,
- op_req.buf.buf_val, op_req.buf.buf_len,
- gf_gld_mt_op_commit_ctx_t, &req_ctx);
- if (ret)
- goto out;
+ // the structures should always be equal
+ GF_ASSERT(sizeof(gd1_mgmt_commit_op_req) == sizeof(gd1_mgmt_stage_op_req));
+ ret = glusterd_req_ctx_create(req, op_req.op, op_req.uuid,
+ op_req.buf.buf_val, op_req.buf.buf_len,
+ gf_gld_mt_op_commit_ctx_t, &req_ctx);
+ if (ret)
+ goto out;
- ret = dict_get_bin (req_ctx->dict, "transaction_id", (void **)&txn_id);
- gf_msg_debug (this->name, 0, "transaction ID = %s",
- uuid_utoa (*txn_id));
+ ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
+ gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_COMMIT_OP,
- txn_id, req_ctx);
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_COMMIT_OP, txn_id, req_ctx);
out:
- free (op_req.buf.buf_val);//malloced by xdr
- glusterd_friend_sm ();
- glusterd_op_sm ();
- return ret;
+ free(op_req.buf.buf_val); // malloced by xdr
+ glusterd_friend_sm();
+ glusterd_op_sm();
+ return ret;
}
int
-glusterd_handle_commit_op (rpcsvc_request_t *req)
+glusterd_handle_commit_op(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_commit_op);
+ return glusterd_big_locked_handler(req, __glusterd_handle_commit_op);
}
int
-__glusterd_handle_cli_probe (rpcsvc_request_t *req)
+__glusterd_handle_cli_probe(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,},};
- glusterd_peerinfo_t *peerinfo = NULL;
- gf_boolean_t run_fsm = _gf_true;
- xlator_t *this = NULL;
- char *bind_name = NULL;
- dict_t *dict = NULL;
- char *hostname = NULL;
- int port = 0;
- int op_errno = 0;
-
- GF_ASSERT (req);
- this = THIS;
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "xdr decoding error");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len, &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL, "Failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- }
- }
-
- ret = dict_get_strn (dict, "hostname", SLEN ("hostname"), &hostname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
- "Failed to get hostname");
- goto out;
- }
-
- ret = dict_get_int32n (dict, "port", SLEN ("port"), &port);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PORT_NOTFOUND_IN_DICT, "Failed to get port");
- goto out;
- }
-
- if (glusterd_is_any_volume_in_server_quorum (this) &&
- !does_gd_meet_server_quorum (this)) {
- glusterd_xfer_cli_probe_resp (req, -1, GF_PROBE_QUORUM_NOT_MET,
- NULL, hostname, port, dict);
- gf_msg (this->name, GF_LOG_CRITICAL, 0,
- GD_MSG_SERVER_QUORUM_NOT_MET,
- "Server quorum not met. Rejecting operation.");
- ret = 0;
- goto out;
- }
+ int32_t ret = -1;
+ gf_cli_req cli_req = {
+ {
+ 0,
+ },
+ };
+ glusterd_peerinfo_t *peerinfo = NULL;
+ gf_boolean_t run_fsm = _gf_true;
+ xlator_t *this = NULL;
+ char *bind_name = NULL;
+ dict_t *dict = NULL;
+ char *hostname = NULL;
+ int port = 0;
+ int op_errno = 0;
+
+ GF_ASSERT(req);
+ this = THIS;
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "xdr decoding error");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ dict = dict_new();
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_CLI_REQ_RECVD,
- "Received CLI probe req %s %d",
- hostname, port);
-
- if (dict_get_strn (this->options, "transport.socket.bind-address",
- SLEN ("transport.socket.bind-address"),
- &bind_name) == 0) {
- gf_msg_debug ("glusterd", 0,
- "only checking probe address vs. bind address");
- ret = gf_is_same_address (bind_name, hostname);
- }
- else {
- ret = gf_is_local_addr (hostname);
- }
- if (ret) {
- glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_LOCALHOST,
- NULL, hostname, port, dict);
- ret = 0;
- goto out;
- }
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "Failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ }
+
+ ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
+ "Failed to get hostname");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "port", SLEN("port"), &port);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PORT_NOTFOUND_IN_DICT,
+ "Failed to get port");
+ goto out;
+ }
+
+ if (glusterd_is_any_volume_in_server_quorum(this) &&
+ !does_gd_meet_server_quorum(this)) {
+ glusterd_xfer_cli_probe_resp(req, -1, GF_PROBE_QUORUM_NOT_MET, NULL,
+ hostname, port, dict);
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
+ "Server quorum not met. Rejecting operation.");
+ ret = 0;
+ goto out;
+ }
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_CLI_REQ_RECVD,
+ "Received CLI probe req %s %d", hostname, port);
+
+ if (dict_get_strn(this->options, "transport.socket.bind-address",
+ SLEN("transport.socket.bind-address"), &bind_name) == 0) {
+ gf_msg_debug("glusterd", 0,
+ "only checking probe address vs. bind address");
+ ret = gf_is_same_address(bind_name, hostname);
+ } else {
+ ret = gf_is_local_addr(hostname);
+ }
+ if (ret) {
+ glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_LOCALHOST, NULL, hostname,
+ port, dict);
+ ret = 0;
+ goto out;
+ }
- rcu_read_lock ();
+ rcu_read_lock();
- peerinfo = glusterd_peerinfo_find_by_hostname (hostname);
- ret = (peerinfo && gd_peer_has_address (peerinfo, hostname));
+ peerinfo = glusterd_peerinfo_find_by_hostname(hostname);
+ ret = (peerinfo && gd_peer_has_address(peerinfo, hostname));
- rcu_read_unlock ();
+ rcu_read_unlock();
- if (ret) {
- gf_msg_debug ("glusterd", 0, "Probe host %s port %d "
- "already a peer", hostname, port);
- glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL,
- hostname, port, dict);
- ret = 0;
- goto out;
- }
+ if (ret) {
+ gf_msg_debug("glusterd", 0,
+ "Probe host %s port %d "
+ "already a peer",
+ hostname, port);
+ glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_FRIEND, NULL, hostname,
+ port, dict);
+ ret = 0;
+ goto out;
+ }
- ret = glusterd_probe_begin (req, hostname, port, dict, &op_errno);
+ ret = glusterd_probe_begin(req, hostname, port, dict, &op_errno);
- if (ret == GLUSTERD_CONNECTION_AWAITED) {
- //fsm should be run after connection establishes
- run_fsm = _gf_false;
- ret = 0;
+ if (ret == GLUSTERD_CONNECTION_AWAITED) {
+ // fsm should be run after connection establishes
+ run_fsm = _gf_false;
+ ret = 0;
- } else if (ret == -1) {
- glusterd_xfer_cli_probe_resp (req, -1, op_errno,
- NULL, hostname, port, dict);
- goto out;
- }
+ } else if (ret == -1) {
+ glusterd_xfer_cli_probe_resp(req, -1, op_errno, NULL, hostname, port,
+ dict);
+ goto out;
+ }
out:
- free (cli_req.dict.dict_val);
+ free(cli_req.dict.dict_val);
- if (run_fsm) {
- glusterd_friend_sm ();
- glusterd_op_sm ();
- }
+ if (run_fsm) {
+ glusterd_friend_sm();
+ glusterd_op_sm();
+ }
- return ret;
+ return ret;
}
int
-glusterd_handle_cli_probe (rpcsvc_request_t *req)
+glusterd_handle_cli_probe(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_cli_probe);
+ return glusterd_big_locked_handler(req, __glusterd_handle_cli_probe);
}
int
-__glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
+__glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,},};
- uuid_t uuid = {0};
- int op_errno = 0;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- dict_t *dict = NULL;
- char *hostname = NULL;
- int port = 0;
- int flags = 0;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_volinfo_t *tmp = NULL;
- glusterd_snap_t *snapinfo = NULL;
- glusterd_snap_t *tmpsnap = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &cli_req,
- (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len, &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL, "Failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- }
- }
-
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_CLI_REQ_RECVD,
- "Received CLI deprobe req");
-
- ret = dict_get_strn (dict, "hostname", SLEN ("hostname"), &hostname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
- "Failed to get hostname");
- goto out;
- }
-
- ret = dict_get_int32n (dict, "port", SLEN ("port"), &port);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PORT_NOTFOUND_IN_DICT, "Failed to get port");
- goto out;
- }
- ret = dict_get_int32n (dict, "flags", SLEN ("flags"), &flags);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FLAGS_NOTFOUND_IN_DICT, "Failed to get flags");
- goto out;
- }
-
- ret = glusterd_hostname_to_uuid (hostname, uuid);
- if (ret) {
- op_errno = GF_DEPROBE_NOT_FRIEND;
- goto out;
- }
-
- if (!gf_uuid_compare (uuid, MY_UUID)) {
- op_errno = GF_DEPROBE_LOCALHOST;
- ret = -1;
- goto out;
- }
-
- if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
- /* Check if peers are connected, except peer being
- * detached*/
- if (!glusterd_chk_peers_connected_befriended (uuid)) {
- ret = -1;
- op_errno = GF_DEPROBE_FRIEND_DOWN;
- goto out;
- }
- }
-
- /* Check for if volumes exist with some bricks on the peer being
- * detached. It's not a problem if a volume contains none or all
- * of its bricks on the peer being detached
- */
- cds_list_for_each_entry_safe (volinfo, tmp, &priv->volumes,
- vol_list) {
- ret = glusterd_friend_contains_vol_bricks (volinfo, uuid);
- if (ret == 1) {
- op_errno = GF_DEPROBE_BRICK_EXIST;
- goto out;
- }
- }
-
- cds_list_for_each_entry_safe (snapinfo, tmpsnap, &priv->snapshots,
- snap_list) {
- ret = glusterd_friend_contains_snap_bricks (snapinfo, uuid);
- if (ret == 1) {
- op_errno = GF_DEPROBE_SNAP_BRICK_EXIST;
- goto out;
- }
- }
- if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
- if (glusterd_is_any_volume_in_server_quorum (this) &&
- !does_gd_meet_server_quorum (this)) {
- gf_msg (this->name, GF_LOG_CRITICAL, 0,
- GD_MSG_SERVER_QUORUM_NOT_MET,
- "Server quorum not met. Rejecting operation.");
- ret = -1;
- op_errno = GF_DEPROBE_QUORUM_NOT_MET;
- goto out;
- }
- }
+ int32_t ret = -1;
+ gf_cli_req cli_req = {
+ {
+ 0,
+ },
+ };
+ uuid_t uuid = {0};
+ int op_errno = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ dict_t *dict = NULL;
+ char *hostname = NULL;
+ int port = 0;
+ int flags = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *tmp = NULL;
+ glusterd_snap_t *snapinfo = NULL;
+ glusterd_snap_t *tmpsnap = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ dict = dict_new();
- if (!gf_uuid_is_null (uuid)) {
- ret = glusterd_deprobe_begin (req, hostname, port, uuid, dict,
- &op_errno);
- } else {
- ret = glusterd_deprobe_begin (req, hostname, port, NULL, dict,
- &op_errno);
- }
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "Failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ }
+ }
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_CLI_REQ_RECVD,
+ "Received CLI deprobe req");
+
+ ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
+ "Failed to get hostname");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "port", SLEN("port"), &port);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PORT_NOTFOUND_IN_DICT,
+ "Failed to get port");
+ goto out;
+ }
+ ret = dict_get_int32n(dict, "flags", SLEN("flags"), &flags);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FLAGS_NOTFOUND_IN_DICT,
+ "Failed to get flags");
+ goto out;
+ }
+
+ ret = glusterd_hostname_to_uuid(hostname, uuid);
+ if (ret) {
+ op_errno = GF_DEPROBE_NOT_FRIEND;
+ goto out;
+ }
+
+ if (!gf_uuid_compare(uuid, MY_UUID)) {
+ op_errno = GF_DEPROBE_LOCALHOST;
+ ret = -1;
+ goto out;
+ }
+
+ if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
+ /* Check if peers are connected, except peer being
+ * detached*/
+ if (!glusterd_chk_peers_connected_befriended(uuid)) {
+ ret = -1;
+ op_errno = GF_DEPROBE_FRIEND_DOWN;
+ goto out;
+ }
+ }
+
+ /* Check for if volumes exist with some bricks on the peer being
+ * detached. It's not a problem if a volume contains none or all
+ * of its bricks on the peer being detached
+ */
+ cds_list_for_each_entry_safe(volinfo, tmp, &priv->volumes, vol_list)
+ {
+ ret = glusterd_friend_contains_vol_bricks(volinfo, uuid);
+ if (ret == 1) {
+ op_errno = GF_DEPROBE_BRICK_EXIST;
+ goto out;
+ }
+ }
+
+ cds_list_for_each_entry_safe(snapinfo, tmpsnap, &priv->snapshots, snap_list)
+ {
+ ret = glusterd_friend_contains_snap_bricks(snapinfo, uuid);
+ if (ret == 1) {
+ op_errno = GF_DEPROBE_SNAP_BRICK_EXIST;
+ goto out;
+ }
+ }
+ if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
+ if (glusterd_is_any_volume_in_server_quorum(this) &&
+ !does_gd_meet_server_quorum(this)) {
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
+ "Server quorum not met. Rejecting operation.");
+ ret = -1;
+ op_errno = GF_DEPROBE_QUORUM_NOT_MET;
+ goto out;
+ }
+ }
+
+ if (!gf_uuid_is_null(uuid)) {
+ ret = glusterd_deprobe_begin(req, hostname, port, uuid, dict,
+ &op_errno);
+ } else {
+ ret = glusterd_deprobe_begin(req, hostname, port, NULL, dict,
+ &op_errno);
+ }
out:
- free (cli_req.dict.dict_val);
+ free(cli_req.dict.dict_val);
- if (ret) {
- ret = glusterd_xfer_cli_deprobe_resp (req, ret, op_errno, NULL,
- hostname, dict);
- }
+ if (ret) {
+ ret = glusterd_xfer_cli_deprobe_resp(req, ret, op_errno, NULL, hostname,
+ dict);
+ }
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_cli_deprobe (rpcsvc_request_t *req)
+glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_cli_deprobe);
+ return glusterd_big_locked_handler(req, __glusterd_handle_cli_deprobe);
}
int
-__glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
+__glusterd_handle_cli_list_friends(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_peer_list_req cli_req = {0,};
- dict_t *dict = NULL;
-
- GF_ASSERT (req);
+ int32_t ret = -1;
+ gf1_cli_peer_list_req cli_req = {
+ 0,
+ };
+ dict_t *dict = NULL;
+
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req,
+ (xdrproc_t)xdr_gf1_cli_peer_list_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_CLI_REQ_RECVD,
+ "Received cli list req");
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
- ret = xdr_to_generic (req->msg[0], &cli_req,
- (xdrproc_t)xdr_gf1_cli_peer_list_req);
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- //failed to decode msg;
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_CLI_REQ_RECVD,
- "Received cli list req");
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
+ }
- ret = glusterd_list_friends (req, dict, cli_req.flags);
+ ret = glusterd_list_friends(req, dict, cli_req.flags);
out:
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_cli_list_friends (rpcsvc_request_t *req)
+glusterd_handle_cli_list_friends(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_cli_list_friends);
+ return glusterd_big_locked_handler(req, __glusterd_handle_cli_list_friends);
}
static int
-__glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
+__glusterd_handle_cli_get_volume(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- int32_t flags = 0;
- dict_t *dict = NULL;
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
- this = THIS;
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ int32_t flags = 0;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_GET_VOL_REQ_RCVD,
+ "Received get vol req");
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_GET_VOL_REQ_RCVD,
- "Received get vol req");
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
+ }
- ret = dict_get_int32n (dict, "flags", SLEN ("flags"), &flags);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FLAGS_NOTFOUND_IN_DICT, "failed to get flags");
- goto out;
- }
- ret = glusterd_get_volumes (req, dict, flags);
+ ret = dict_get_int32n(dict, "flags", SLEN("flags"), &flags);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FLAGS_NOTFOUND_IN_DICT,
+ "failed to get flags");
+ goto out;
+ }
+ ret = glusterd_get_volumes(req, dict, flags);
out:
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_cli_get_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_get_volume(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_cli_get_volume);
+ return glusterd_big_locked_handler(req, __glusterd_handle_cli_get_volume);
}
int
-__glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
+__glusterd_handle_cli_uuid_reset(rpcsvc_request_t *req)
{
- int ret = -1;
- dict_t *dict = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- uuid_t uuid = {0};
- gf_cli_rsp rsp = {0,};
- gf_cli_req cli_req = {{0,}};
- char msg_str[128] = {0,};
-
- GF_ASSERT (req);
-
- this = THIS;
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_msg_debug ("glusterd", 0, "Received uuid reset req");
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (msg_str, sizeof (msg_str), "Unable to decode "
- "the buffer");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
- }
+ int ret = -1;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ uuid_t uuid = {0};
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ char msg_str[128] = {
+ 0,
+ };
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg_debug("glusterd", 0, "Received uuid reset req");
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
- /* In the above section if dict_unserialize is successful, ret is set
- * to zero.
- */
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(msg_str, sizeof(msg_str),
+ "Unable to decode "
+ "the buffer");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ /* In the above section if dict_unserialize is successful, ret is set
+ * to zero.
+ */
+ ret = -1;
+ // Do not allow peer reset if there are any volumes in the cluster
+ if (!cds_list_empty(&priv->volumes)) {
+ snprintf(msg_str, sizeof(msg_str),
+ "volumes are already "
+ "present in the cluster. Resetting uuid is not "
+ "allowed");
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLS_ALREADY_PRESENT, "%s",
+ msg_str);
+ goto out;
+ }
+
+ // Do not allow peer reset if trusted storage pool is already formed
+ if (!cds_list_empty(&priv->peers)) {
+ snprintf(msg_str, sizeof(msg_str),
+ "trusted storage pool "
+ "has been already formed. Please detach this peer "
+ "from the pool and reset its uuid.");
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_TSP_ALREADY_FORMED, "%s",
+ msg_str);
+ goto out;
+ }
+
+ gf_uuid_copy(uuid, priv->uuid);
+ ret = glusterd_uuid_generate_save();
+
+ if (!gf_uuid_compare(uuid, MY_UUID)) {
+ snprintf(msg_str, sizeof(msg_str),
+ "old uuid and the new uuid"
+ " are same. Try gluster peer reset again");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UUIDS_SAME_RETRY, "%s",
+ msg_str);
ret = -1;
- // Do not allow peer reset if there are any volumes in the cluster
- if (!cds_list_empty (&priv->volumes)) {
- snprintf (msg_str, sizeof (msg_str), "volumes are already "
- "present in the cluster. Resetting uuid is not "
- "allowed");
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_VOLS_ALREADY_PRESENT, "%s", msg_str);
- goto out;
- }
-
- // Do not allow peer reset if trusted storage pool is already formed
- if (!cds_list_empty (&priv->peers)) {
- snprintf (msg_str, sizeof (msg_str),"trusted storage pool "
- "has been already formed. Please detach this peer "
- "from the pool and reset its uuid.");
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_TSP_ALREADY_FORMED, "%s", msg_str);
- goto out;
- }
-
- gf_uuid_copy (uuid, priv->uuid);
- ret = glusterd_uuid_generate_save ();
-
- if (!gf_uuid_compare (uuid, MY_UUID)) {
- snprintf (msg_str, sizeof (msg_str), "old uuid and the new uuid"
- " are same. Try gluster peer reset again");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_UUIDS_SAME_RETRY, "%s", msg_str);
- ret = -1;
- goto out;
- }
+ goto out;
+ }
out:
- if (ret) {
- rsp.op_ret = -1;
- if (msg_str[0] == '\0')
- snprintf (msg_str, sizeof (msg_str), "Operation "
- "failed");
- rsp.op_errstr = msg_str;
- ret = 0;
- } else {
- rsp.op_errstr = "";
- }
+ if (ret) {
+ rsp.op_ret = -1;
+ if (msg_str[0] == '\0')
+ snprintf(msg_str, sizeof(msg_str),
+ "Operation "
+ "failed");
+ rsp.op_errstr = msg_str;
+ ret = 0;
+ } else {
+ rsp.op_errstr = "";
+ }
- glusterd_to_cli (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp, dict);
+ glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
- return ret;
+ return ret;
}
int
-glusterd_handle_cli_uuid_reset (rpcsvc_request_t *req)
+glusterd_handle_cli_uuid_reset(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_cli_uuid_reset);
+ return glusterd_big_locked_handler(req, __glusterd_handle_cli_uuid_reset);
}
int
-__glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
+__glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
{
- int ret = -1;
- dict_t *dict = NULL;
- dict_t *rsp_dict = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- gf_cli_rsp rsp = {0,};
- gf_cli_req cli_req = {{0,}};
- char err_str[64] = {0,};
- char uuid_str[64] = {0,};
-
- GF_ASSERT (req);
-
- this = THIS;
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_msg_debug ("glusterd", 0, "Received uuid get req");
-
- if (cli_req.dict.dict_len) {
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- goto out;
- }
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the buffer");
- goto out;
-
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
-
- }
+ int ret = -1;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ char err_str[64] = {
+ 0,
+ };
+ char uuid_str[64] = {
+ 0,
+ };
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg_debug("glusterd", 0, "Received uuid get req");
+
+ if (cli_req.dict.dict_len) {
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ goto out;
}
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- ret = -1;
- goto out;
- }
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the buffer");
+ goto out;
- uuid_utoa_r (MY_UUID, uuid_str);
- ret = dict_set_strn (rsp_dict, "uuid", SLEN ("uuid"), uuid_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to set uuid in "
- "dictionary.");
- goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
+ }
- ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "Failed to serialize "
- "dictionary.");
- goto out;
- }
- ret = 0;
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ ret = -1;
+ goto out;
+ }
+
+ uuid_utoa_r(MY_UUID, uuid_str);
+ ret = dict_set_strn(rsp_dict, "uuid", SLEN("uuid"), uuid_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set uuid in "
+ "dictionary.");
+ goto out;
+ }
+
+ ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
+ "Failed to serialize "
+ "dictionary.");
+ goto out;
+ }
+ ret = 0;
out:
- if (ret) {
- rsp.op_ret = -1;
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str), "Operation "
- "failed");
- rsp.op_errstr = err_str;
-
- } else {
- rsp.op_errstr = "";
-
- }
+ if (ret) {
+ rsp.op_ret = -1;
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str),
+ "Operation "
+ "failed");
+ rsp.op_errstr = err_str;
+
+ } else {
+ rsp.op_errstr = "";
+ }
- glusterd_to_cli (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp, dict);
+ glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
- return 0;
+ return 0;
}
int
-glusterd_handle_cli_uuid_get (rpcsvc_request_t *req)
+glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_cli_uuid_get);
+ return glusterd_big_locked_handler(req, __glusterd_handle_cli_uuid_get);
}
int
-__glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
+__glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
{
- int ret = -1;
- dict_t *dict = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- int count = 0;
- char key[64] = {0,};
- int keylen;
- gf_cli_rsp rsp = {0,};
-
- GF_ASSERT (req);
-
- priv = THIS->private;
- GF_ASSERT (priv);
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
- keylen = snprintf (key, sizeof (key), "volume%d", count);
- ret = dict_set_strn (dict, key, keylen, volinfo->volname);
- if (ret)
- goto out;
- count++;
- }
-
- ret = dict_set_int32n (dict, "count", SLEN ("count"), count);
+ int ret = -1;
+ dict_t *dict = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int count = 0;
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+ gf_cli_rsp rsp = {
+ 0,
+ };
+
+ GF_ASSERT(req);
+
+ priv = THIS->private;
+ GF_ASSERT(priv);
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+ keylen = snprintf(key, sizeof(key), "volume%d", count);
+ ret = dict_set_strn(dict, key, keylen, volinfo->volname);
if (ret)
- goto out;
+ goto out;
+ count++;
+ }
- ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
- if (ret)
- goto out;
+ ret = dict_set_int32n(dict, "count", SLEN("count"), count);
+ if (ret)
+ goto out;
- ret = 0;
+ ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret)
+ goto out;
+
+ ret = 0;
out:
- rsp.op_ret = ret;
- if (ret)
- rsp.op_errstr = "Error listing volumes";
- else
- rsp.op_errstr = "";
+ rsp.op_ret = ret;
+ if (ret)
+ rsp.op_errstr = "Error listing volumes";
+ else
+ rsp.op_errstr = "";
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp);
- ret = 0;
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
+ ret = 0;
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_cli_list_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_cli_list_volume);
+ return glusterd_big_locked_handler(req, __glusterd_handle_cli_list_volume);
}
int32_t
-glusterd_op_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
- char *err_str, size_t err_len)
+glusterd_op_begin(rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
+ char *err_str, size_t err_len)
{
- int ret = -1;
+ int ret = -1;
- ret = glusterd_op_txn_begin (req, op, ctx, err_str, err_len);
+ ret = glusterd_op_txn_begin(req, op, ctx, err_str, err_len);
- return ret;
+ return ret;
}
static int
-__glusterd_handle_reset_volume (rpcsvc_request_t *req)
+__glusterd_handle_reset_volume(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- dict_t *dict = NULL;
- glusterd_op_t cli_op = GD_OP_RESET_VOLUME;
- char *volname = NULL;
- char err_str[64] = {0,};
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- gf_msg (this->name, GF_LOG_INFO, 0, 0,
- "Received reset vol req");
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- snprintf (err_str, sizeof (err_str), "Failed to decode request "
- "received from cli");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
- }
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Failed to get volume "
- "name");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
- goto out;
- }
- gf_msg_debug (this->name, 0, "Received volume reset request for "
- "volume %s", volname);
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_RESET_VOLUME;
+ char *volname = NULL;
+ char err_str[64] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ gf_msg(this->name, GF_LOG_INFO, 0, 0, "Received reset vol req");
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to decode request "
+ "received from cli");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
+ err_str);
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
- ret = glusterd_op_begin_synctask (req, GD_OP_RESET_VOLUME, dict);
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to get volume "
+ "name");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
+ "%s", err_str);
+ goto out;
+ }
+ gf_msg_debug(this->name, 0,
+ "Received volume reset request for "
+ "volume %s",
+ volname);
+
+ ret = glusterd_op_begin_synctask(req, GD_OP_RESET_VOLUME, dict);
out:
- if (ret) {
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
- }
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
- return ret;
+ return ret;
}
int
-glusterd_handle_reset_volume (rpcsvc_request_t *req)
+glusterd_handle_reset_volume(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_reset_volume);
+ return glusterd_big_locked_handler(req, __glusterd_handle_reset_volume);
}
int
-__glusterd_handle_set_volume (rpcsvc_request_t *req)
+__glusterd_handle_set_volume(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- dict_t *dict = NULL;
- glusterd_op_t cli_op = GD_OP_SET_VOLUME;
- char *key = NULL;
- char *value = NULL;
- char *volname = NULL;
- char *op_errstr = NULL;
- gf_boolean_t help = _gf_false;
- char err_str[2048] = {0,};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- snprintf (err_str, sizeof (err_str), "Failed to decode "
- "request received from cli");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
- }
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Failed to get volume "
- "name while handling volume set command");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- if (strcmp (volname, "help") == 0 ||
- strcmp (volname, "help-xml") == 0) {
- ret = glusterd_volset_help (dict, &op_errstr);
- help = _gf_true;
- goto out;
- }
-
- ret = dict_get_strn (dict, "key1", SLEN ("key1"), &key);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Failed to get key while"
- " handling volume set for %s", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- ret = dict_get_strn (dict, "value1", SLEN ("value1"), &value);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Failed to get value while"
- " handling volume set for %s", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
- gf_msg_debug (this->name, 0, "Received volume set request for "
- "volume %s", volname);
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_SET_VOLUME;
+ char *key = NULL;
+ char *value = NULL;
+ char *volname = NULL;
+ char *op_errstr = NULL;
+ gf_boolean_t help = _gf_false;
+ char err_str[2048] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to decode "
+ "request received from cli");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
+ err_str);
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
- ret = glusterd_op_begin_synctask (req, GD_OP_SET_VOLUME, dict);
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to get volume "
+ "name while handling volume set command");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ if (strcmp(volname, "help") == 0 || strcmp(volname, "help-xml") == 0) {
+ ret = glusterd_volset_help(dict, &op_errstr);
+ help = _gf_true;
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "key1", SLEN("key1"), &key);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to get key while"
+ " handling volume set for %s",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "value1", SLEN("value1"), &value);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to get value while"
+ " handling volume set for %s",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+ gf_msg_debug(this->name, 0,
+ "Received volume set request for "
+ "volume %s",
+ volname);
+
+ ret = glusterd_op_begin_synctask(req, GD_OP_SET_VOLUME, dict);
out:
- if (help)
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, dict,
- (op_errstr)? op_errstr:"");
- else if (ret) {
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
- }
- if (op_errstr)
- GF_FREE (op_errstr);
-
- return ret;
+ if (help)
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict,
+ (op_errstr) ? op_errstr : "");
+ else if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
+ if (op_errstr)
+ GF_FREE(op_errstr);
+
+ return ret;
}
int
-glusterd_handle_set_volume (rpcsvc_request_t *req)
+glusterd_handle_set_volume(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_set_volume);
+ return glusterd_big_locked_handler(req, __glusterd_handle_set_volume);
}
int
-__glusterd_handle_sync_volume (rpcsvc_request_t *req)
+__glusterd_handle_sync_volume(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- dict_t *dict = NULL;
- gf_cli_rsp cli_rsp = {0.};
- char msg[2048] = {0,};
- char *volname = NULL;
- gf1_cli_sync_volume flags = 0;
- char *hostname = NULL;
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "%s", "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ gf_cli_rsp cli_rsp = {0.};
+ char msg[2048] = {
+ 0,
+ };
+ char *volname = NULL;
+ gf1_cli_sync_volume flags = 0;
+ char *hostname = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (msg, sizeof (msg), "Unable to decode the "
- "command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(msg, sizeof(msg),
+ "Unable to decode the "
+ "command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
+ }
- ret = dict_get_strn (dict, "hostname", SLEN ("hostname"), &hostname);
- if (ret) {
- snprintf (msg, sizeof (msg), "Failed to get hostname");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_HOSTNAME_NOTFOUND_IN_DICT, "%s", msg);
- goto out;
- }
+ ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Failed to get hostname");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
+ "%s", msg);
+ goto out;
+ }
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ ret = dict_get_int32n(dict, "flags", SLEN("flags"), (int32_t *)&flags);
if (ret) {
- ret = dict_get_int32n (dict, "flags", SLEN ("flags"),
- (int32_t*)&flags);
- if (ret) {
- snprintf (msg, sizeof (msg),
- "Failed to get volume name or flags");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FLAGS_NOTFOUND_IN_DICT, "%s", msg);
- goto out;
- }
+ snprintf(msg, sizeof(msg), "Failed to get volume name or flags");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FLAGS_NOTFOUND_IN_DICT,
+ "%s", msg);
+ goto out;
}
+ }
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_SYNC_REQ_RCVD, "Received volume sync req "
- "for volume %s", (flags & GF_CLI_SYNC_ALL) ? "all" : volname);
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_SYNC_REQ_RCVD,
+ "Received volume sync req "
+ "for volume %s",
+ (flags & GF_CLI_SYNC_ALL) ? "all" : volname);
- if (gf_is_local_addr (hostname)) {
- ret = -1;
- snprintf (msg, sizeof (msg), "sync from localhost"
- " not allowed");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SYNC_FROM_LOCALHOST_UNALLOWED, "%s", msg);
- goto out;
- }
+ if (gf_is_local_addr(hostname)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "sync from localhost"
+ " not allowed");
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SYNC_FROM_LOCALHOST_UNALLOWED, "%s", msg);
+ goto out;
+ }
- ret = glusterd_op_begin_synctask (req, GD_OP_SYNC_VOLUME, dict);
+ ret = glusterd_op_begin_synctask(req, GD_OP_SYNC_VOLUME, dict);
out:
- if (ret) {
- cli_rsp.op_ret = -1;
- cli_rsp.op_errstr = msg;
- if (msg[0] == '\0')
- snprintf (msg, sizeof (msg), "Operation failed");
- glusterd_to_cli (req, &cli_rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp, dict);
-
- ret = 0; //sent error to cli, prevent second reply
- }
-
- return ret;
+ if (ret) {
+ cli_rsp.op_ret = -1;
+ cli_rsp.op_errstr = msg;
+ if (msg[0] == '\0')
+ snprintf(msg, sizeof(msg), "Operation failed");
+ glusterd_to_cli(req, &cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
+ dict);
+
+ ret = 0; // sent error to cli, prevent second reply
+ }
+
+ return ret;
}
int
-glusterd_handle_sync_volume (rpcsvc_request_t *req)
+glusterd_handle_sync_volume(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_sync_volume);
+ return glusterd_big_locked_handler(req, __glusterd_handle_sync_volume);
}
int
-glusterd_fsm_log_send_resp (rpcsvc_request_t *req, int op_ret,
- char *op_errstr, dict_t *dict)
+glusterd_fsm_log_send_resp(rpcsvc_request_t *req, int op_ret, char *op_errstr,
+ dict_t *dict)
{
+ int ret = -1;
+ gf1_cli_fsm_log_rsp rsp = {0};
- int ret = -1;
- gf1_cli_fsm_log_rsp rsp = {0};
-
- GF_ASSERT (req);
- GF_ASSERT (op_errstr);
+ GF_ASSERT(req);
+ GF_ASSERT(op_errstr);
- rsp.op_ret = op_ret;
- rsp.op_errstr = op_errstr;
- if (rsp.op_ret == 0) {
- ret = dict_allocate_and_serialize (dict, &rsp.fsm_log.fsm_log_val,
- &rsp.fsm_log.fsm_log_len);
- if (ret < 0) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
- return ret;
- }
+ rsp.op_ret = op_ret;
+ rsp.op_errstr = op_errstr;
+ if (rsp.op_ret == 0) {
+ ret = dict_allocate_and_serialize(dict, &rsp.fsm_log.fsm_log_val,
+ &rsp.fsm_log.fsm_log_len);
+ if (ret < 0) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
+ "failed to get serialized length of dict");
+ return ret;
}
+ }
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
- GF_FREE (rsp.fsm_log.fsm_log_val);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
+ GF_FREE(rsp.fsm_log.fsm_log_val);
- gf_msg_debug ("glusterd", 0, "Responded, ret: %d", ret);
+ gf_msg_debug("glusterd", 0, "Responded, ret: %d", ret);
- return 0;
+ return 0;
}
int
-__glusterd_handle_fsm_log (rpcsvc_request_t *req)
+__glusterd_handle_fsm_log(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_fsm_log_req cli_req = {0,};
- dict_t *dict = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- char msg[2048] = {0};
- glusterd_peerinfo_t *peerinfo = NULL;
-
- GF_ASSERT (req);
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("xlator", (this != NULL), out);
-
- ret = xdr_to_generic (req->msg[0], &cli_req,
- (xdrproc_t)xdr_gf1_cli_fsm_log_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from client.");
- req->rpc_err = GARBAGE_ARGS;
- snprintf (msg, sizeof (msg), "Garbage request");
- goto out;
- }
+ int32_t ret = -1;
+ gf1_cli_fsm_log_req cli_req = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char msg[2048] = {0};
+ glusterd_peerinfo_t *peerinfo = NULL;
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("xlator", (this != NULL), out);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req,
+ (xdrproc_t)xdr_gf1_cli_fsm_log_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from client.");
+ req->rpc_err = GARBAGE_ARGS;
+ snprintf(msg, sizeof(msg), "Garbage request");
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- goto out;
- }
+ if (strcmp("", cli_req.name) == 0) {
+ conf = this->private;
+ ret = glusterd_sm_tr_log_add_to_dict(dict, &conf->op_sm_log);
+ } else {
+ rcu_read_lock();
- if (strcmp ("", cli_req.name) == 0) {
- conf = this->private;
- ret = glusterd_sm_tr_log_add_to_dict (dict, &conf->op_sm_log);
+ peerinfo = glusterd_peerinfo_find_by_hostname(cli_req.name);
+ if (!peerinfo) {
+ ret = -1;
+ snprintf(msg, sizeof(msg), "%s is not a peer", cli_req.name);
} else {
- rcu_read_lock ();
-
- peerinfo = glusterd_peerinfo_find_by_hostname (cli_req.name);
- if (!peerinfo) {
- ret = -1;
- snprintf (msg, sizeof (msg), "%s is not a peer",
- cli_req.name);
- } else {
- ret = glusterd_sm_tr_log_add_to_dict
- (dict, &peerinfo->sm_log);
- }
-
- rcu_read_unlock ();
+ ret = glusterd_sm_tr_log_add_to_dict(dict, &peerinfo->sm_log);
}
+ rcu_read_unlock();
+ }
+
out:
- (void)glusterd_fsm_log_send_resp (req, ret, msg, dict);
- free (cli_req.name);//malloced by xdr
- if (dict)
- dict_unref (dict);
+ (void)glusterd_fsm_log_send_resp(req, ret, msg, dict);
+ free(cli_req.name); // malloced by xdr
+ if (dict)
+ dict_unref(dict);
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return 0;//send 0 to avoid double reply
+ return 0; // send 0 to avoid double reply
}
int
-glusterd_handle_fsm_log (rpcsvc_request_t *req)
+glusterd_handle_fsm_log(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_fsm_log);
+ return glusterd_big_locked_handler(req, __glusterd_handle_fsm_log);
}
int
-glusterd_op_lock_send_resp (rpcsvc_request_t *req, int32_t status)
+glusterd_op_lock_send_resp(rpcsvc_request_t *req, int32_t status)
{
+ gd1_mgmt_cluster_lock_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
- gd1_mgmt_cluster_lock_rsp rsp = {{0},};
- int ret = -1;
+ GF_ASSERT(req);
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op_ret = status;
- GF_ASSERT (req);
- glusterd_get_uuid (&rsp.uuid);
- rsp.op_ret = status;
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
+ gf_msg_debug(THIS->name, 0, "Responded to lock, ret: %d", ret);
- gf_msg_debug (THIS->name, 0, "Responded to lock, ret: %d", ret);
-
- return 0;
+ return 0;
}
int
-glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
+glusterd_op_unlock_send_resp(rpcsvc_request_t *req, int32_t status)
{
+ gd1_mgmt_cluster_unlock_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
- gd1_mgmt_cluster_unlock_rsp rsp = {{0},};
- int ret = -1;
-
- GF_ASSERT (req);
- rsp.op_ret = status;
- glusterd_get_uuid (&rsp.uuid);
+ GF_ASSERT(req);
+ rsp.op_ret = status;
+ glusterd_get_uuid(&rsp.uuid);
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
- gf_msg_debug (THIS->name, 0, "Responded to unlock, ret: %d", ret);
+ gf_msg_debug(THIS->name, 0, "Responded to unlock, ret: %d", ret);
- return ret;
+ return ret;
}
int
-glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+glusterd_op_mgmt_v3_lock_send_resp(rpcsvc_request_t *req, uuid_t *txn_id,
int32_t status)
{
+ gd1_mgmt_v3_lock_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
- gd1_mgmt_v3_lock_rsp rsp = {{0},};
- int ret = -1;
-
- GF_ASSERT (req);
- GF_ASSERT (txn_id);
- glusterd_get_uuid (&rsp.uuid);
- rsp.op_ret = status;
- if (rsp.op_ret)
- rsp.op_errno = errno;
- gf_uuid_copy (rsp.txn_id, *txn_id);
+ GF_ASSERT(req);
+ GF_ASSERT(txn_id);
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ gf_uuid_copy(rsp.txn_id, *txn_id);
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
- gf_msg_debug (THIS->name, 0, "Responded to mgmt_v3 lock, ret: %d",
- ret);
+ gf_msg_debug(THIS->name, 0, "Responded to mgmt_v3 lock, ret: %d", ret);
- return ret;
+ return ret;
}
int
-glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+glusterd_op_mgmt_v3_unlock_send_resp(rpcsvc_request_t *req, uuid_t *txn_id,
int32_t status)
{
+ gd1_mgmt_v3_unlock_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
- gd1_mgmt_v3_unlock_rsp rsp = {{0},};
- int ret = -1;
+ GF_ASSERT(req);
+ GF_ASSERT(txn_id);
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
+ glusterd_get_uuid(&rsp.uuid);
+ gf_uuid_copy(rsp.txn_id, *txn_id);
- GF_ASSERT (req);
- GF_ASSERT (txn_id);
- rsp.op_ret = status;
- if (rsp.op_ret)
- rsp.op_errno = errno;
- glusterd_get_uuid (&rsp.uuid);
- gf_uuid_copy (rsp.txn_id, *txn_id);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
+ gf_msg_debug(THIS->name, 0, "Responded to mgmt_v3 unlock, ret: %d", ret);
- gf_msg_debug (THIS->name, 0, "Responded to mgmt_v3 unlock, ret: %d",
- ret);
-
- return ret;
+ return ret;
}
int
-__glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
+__glusterd_handle_cluster_unlock(rpcsvc_request_t *req)
{
- gd1_mgmt_cluster_unlock_req unlock_req = {{0}, };
- int32_t ret = -1;
- glusterd_op_lock_ctx_t *ctx = NULL;
- xlator_t *this = NULL;
- uuid_t *txn_id = NULL;
- glusterd_conf_t *priv = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (req);
-
- txn_id = &priv->global_txn_id;
-
- ret = xdr_to_generic (req->msg[0], &unlock_req,
- (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode unlock "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
-
- gf_msg_debug (this->name, 0,
- "Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid));
-
- rcu_read_lock ();
- ret = (glusterd_peerinfo_find_by_uuid (unlock_req.uuid) == NULL);
- rcu_read_unlock ();
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (unlock_req.uuid));
- ret = -1;
- goto out;
- }
+ gd1_mgmt_cluster_unlock_req unlock_req = {
+ {0},
+ };
+ int32_t ret = -1;
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(req);
+
+ txn_id = &priv->global_txn_id;
+
+ ret = xdr_to_generic(req->msg[0], &unlock_req,
+ (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode unlock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg_debug(this->name, 0, "Received UNLOCK from uuid: %s",
+ uuid_utoa(unlock_req.uuid));
+
+ rcu_read_lock();
+ ret = (glusterd_peerinfo_find_by_uuid(unlock_req.uuid) == NULL);
+ rcu_read_unlock();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(unlock_req.uuid));
+ ret = -1;
+ goto out;
+ }
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
- if (!ctx) {
- //respond here
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY, "No memory.");
- return -1;
- }
- gf_uuid_copy (ctx->uuid, unlock_req.uuid);
- ctx->req = req;
- ctx->dict = NULL;
+ if (!ctx) {
+ // respond here
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "No memory.");
+ return -1;
+ }
+ gf_uuid_copy(ctx->uuid, unlock_req.uuid);
+ ctx->req = req;
+ ctx->dict = NULL;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK, txn_id, ctx);
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_UNLOCK, txn_id, ctx);
out:
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
+glusterd_handle_cluster_unlock(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_cluster_unlock);
+ return glusterd_big_locked_handler(req, __glusterd_handle_cluster_unlock);
}
int
-glusterd_op_stage_send_resp (rpcsvc_request_t *req,
- int32_t op, int32_t status,
- char *op_errstr, dict_t *rsp_dict)
+glusterd_op_stage_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
{
- gd1_mgmt_stage_op_rsp rsp = {{0},};
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- rsp.op_ret = status;
- glusterd_get_uuid (&rsp.uuid);
- rsp.op = op;
- if (op_errstr)
- rsp.op_errstr = op_errstr;
- else
- rsp.op_errstr = "";
-
- ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
- return ret;
- }
+ gd1_mgmt_stage_op_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
+ "failed to get serialized length of dict");
+ return ret;
+ }
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
- gf_msg_debug (this->name, 0, "Responded to stage, ret: %d", ret);
- GF_FREE (rsp.dict.dict_val);
+ gf_msg_debug(this->name, 0, "Responded to stage, ret: %d", ret);
+ GF_FREE(rsp.dict.dict_val);
- return ret;
+ return ret;
}
int
-glusterd_op_commit_send_resp (rpcsvc_request_t *req,
- int32_t op, int32_t status, char *op_errstr,
- dict_t *rsp_dict)
+glusterd_op_commit_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
+ char *op_errstr, dict_t *rsp_dict)
{
- gd1_mgmt_commit_op_rsp rsp = {{0}, };
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- rsp.op_ret = status;
- glusterd_get_uuid (&rsp.uuid);
- rsp.op = op;
-
- if (op_errstr)
- rsp.op_errstr = op_errstr;
- else
- rsp.op_errstr = "";
-
- if (rsp_dict) {
- ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
- goto out;
- }
- }
+ gd1_mgmt_commit_op_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ rsp.op_ret = status;
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op = op;
+
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+ if (rsp_dict) {
+ ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+ }
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
- gf_msg_debug (this->name, 0, "Responded to commit, ret: %d", ret);
+ gf_msg_debug(this->name, 0, "Responded to commit, ret: %d", ret);
out:
- GF_FREE (rsp.dict.dict_val);
- return ret;
+ GF_FREE(rsp.dict.dict_val);
+ return ret;
}
int
-__glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
+__glusterd_handle_incoming_friend_req(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_friend_req friend_req = {{0},};
- gf_boolean_t run_fsm = _gf_true;
-
- GF_ASSERT (req);
- ret = xdr_to_generic (req->msg[0], &friend_req,
- (xdrproc_t)xdr_gd1_mgmt_friend_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from friend");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_PROBE_RCVD,
- "Received probe from uuid: %s", uuid_utoa (friend_req.uuid));
- ret = glusterd_handle_friend_req (req, friend_req.uuid,
- friend_req.hostname, friend_req.port,
- &friend_req);
-
- if (ret == GLUSTERD_CONNECTION_AWAITED) {
- //fsm should be run after connection establishes
- run_fsm = _gf_false;
- ret = 0;
- }
+ int32_t ret = -1;
+ gd1_mgmt_friend_req friend_req = {
+ {0},
+ };
+ gf_boolean_t run_fsm = _gf_true;
+
+ GF_ASSERT(req);
+ ret = xdr_to_generic(req->msg[0], &friend_req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from friend");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PROBE_RCVD,
+ "Received probe from uuid: %s", uuid_utoa(friend_req.uuid));
+ ret = glusterd_handle_friend_req(req, friend_req.uuid, friend_req.hostname,
+ friend_req.port, &friend_req);
+
+ if (ret == GLUSTERD_CONNECTION_AWAITED) {
+ // fsm should be run after connection establishes
+ run_fsm = _gf_false;
+ ret = 0;
+ }
out:
- free (friend_req.hostname);//malloced by xdr
+ free(friend_req.hostname); // malloced by xdr
- if (run_fsm) {
- glusterd_friend_sm ();
- glusterd_op_sm ();
- }
+ if (run_fsm) {
+ glusterd_friend_sm();
+ glusterd_op_sm();
+ }
- return ret;
+ return ret;
}
int
-glusterd_handle_incoming_friend_req (rpcsvc_request_t *req)
+glusterd_handle_incoming_friend_req(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_incoming_friend_req);
+ return glusterd_big_locked_handler(req,
+ __glusterd_handle_incoming_friend_req);
}
int
-__glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
+__glusterd_handle_incoming_unfriend_req(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_friend_req friend_req = {{0},};
- char remote_hostname[UNIX_PATH_MAX + 1] = {0,};
-
- GF_ASSERT (req);
- ret = xdr_to_generic (req->msg[0], &friend_req,
- (xdrproc_t)xdr_gd1_mgmt_friend_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received.");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_UNFRIEND_REQ_RCVD,
- "Received unfriend from uuid: %s", uuid_utoa (friend_req.uuid));
-
- ret = glusterd_remote_hostname_get (req, remote_hostname,
- sizeof (remote_hostname));
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_HOSTNAME_RESOLVE_FAIL,
- "Unable to get the remote hostname");
- goto out;
- }
- ret = glusterd_handle_unfriend_req (req, friend_req.uuid,
- remote_hostname, friend_req.port);
+ int32_t ret = -1;
+ gd1_mgmt_friend_req friend_req = {
+ {0},
+ };
+ char remote_hostname[UNIX_PATH_MAX + 1] = {
+ 0,
+ };
+
+ GF_ASSERT(req);
+ ret = xdr_to_generic(req->msg[0], &friend_req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received.");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_UNFRIEND_REQ_RCVD,
+ "Received unfriend from uuid: %s", uuid_utoa(friend_req.uuid));
+
+ ret = glusterd_remote_hostname_get(req, remote_hostname,
+ sizeof(remote_hostname));
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
+ "Unable to get the remote hostname");
+ goto out;
+ }
+ ret = glusterd_handle_unfriend_req(req, friend_req.uuid, remote_hostname,
+ friend_req.port);
out:
- free (friend_req.hostname);//malloced by xdr
- free (friend_req.vols.vols_val);//malloced by xdr
+ free(friend_req.hostname); // malloced by xdr
+ free(friend_req.vols.vols_val); // malloced by xdr
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_incoming_unfriend_req (rpcsvc_request_t *req)
+glusterd_handle_incoming_unfriend_req(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_incoming_unfriend_req);
-
+ return glusterd_big_locked_handler(req,
+ __glusterd_handle_incoming_unfriend_req);
}
int
-glusterd_handle_friend_update_delete (dict_t *dict)
+glusterd_handle_friend_update_delete(dict_t *dict)
{
- char *hostname = NULL;
- int32_t ret = -1;
+ char *hostname = NULL;
+ int32_t ret = -1;
- GF_ASSERT (dict);
+ GF_ASSERT(dict);
- ret = dict_get_strn (dict, "hostname", SLEN ("hostname"), &hostname);
- if (ret)
- goto out;
+ ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
+ if (ret)
+ goto out;
- ret = glusterd_friend_remove (NULL, hostname);
+ ret = glusterd_friend_remove(NULL, hostname);
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_peer_hostname_update (glusterd_peerinfo_t *peerinfo,
- const char *hostname, gf_boolean_t store_update)
+glusterd_peer_hostname_update(glusterd_peerinfo_t *peerinfo,
+ const char *hostname, gf_boolean_t store_update)
{
- int ret = 0;
+ int ret = 0;
- GF_ASSERT (peerinfo);
- GF_ASSERT (hostname);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(hostname);
- ret = gd_add_address_to_peer (peerinfo, hostname);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_HOSTNAME_ADD_TO_PEERLIST_FAIL,
- "Couldn't add address to the peer info");
- goto out;
- }
+ ret = gd_add_address_to_peer(peerinfo, hostname);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_HOSTNAME_ADD_TO_PEERLIST_FAIL,
+ "Couldn't add address to the peer info");
+ goto out;
+ }
- if (store_update)
- ret = glusterd_store_peerinfo (peerinfo);
+ if (store_update)
+ ret = glusterd_store_peerinfo(peerinfo);
out:
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ return ret;
}
int
-__glusterd_handle_friend_update (rpcsvc_request_t *req)
+__glusterd_handle_friend_update(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_friend_update friend_req = {{0},};
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- gd1_mgmt_friend_update_rsp rsp = {{0},};
- dict_t *dict = NULL;
- char key[100] = {0,};
- int keylen;
- char *uuid_buf = NULL;
- int i = 1;
- int count = 0;
- uuid_t uuid = {0,};
- glusterd_peerctx_args_t args = {0};
- int32_t op = 0;
-
- GF_ASSERT (req);
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = xdr_to_generic (req->msg[0], &friend_req,
- (xdrproc_t)xdr_gd1_mgmt_friend_update);
+ int32_t ret = -1;
+ gd1_mgmt_friend_update friend_req = {
+ {0},
+ };
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ gd1_mgmt_friend_update_rsp rsp = {
+ {0},
+ };
+ dict_t *dict = NULL;
+ char key[100] = {
+ 0,
+ };
+ int keylen;
+ char *uuid_buf = NULL;
+ int i = 1;
+ int count = 0;
+ uuid_t uuid = {
+ 0,
+ };
+ glusterd_peerctx_args_t args = {0};
+ int32_t op = 0;
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = xdr_to_generic(req->msg[0], &friend_req,
+ (xdrproc_t)xdr_gd1_mgmt_friend_update);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ ret = 0;
+ rcu_read_lock();
+ if (glusterd_peerinfo_find(friend_req.uuid, NULL) == NULL) {
+ ret = -1;
+ }
+ rcu_read_unlock();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
+ "Received friend update request "
+ "from unknown peer %s",
+ uuid_utoa(friend_req.uuid));
+ gf_event(EVENT_UNKNOWN_PEER, "peer=%s", uuid_utoa(friend_req.uuid));
+ goto out;
+ }
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_FRIEND_UPDATE_RCVD,
+ "Received friend update from uuid: %s", uuid_utoa(friend_req.uuid));
+
+ if (friend_req.friends.friends_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
+
+ ret = dict_unserialize(friend_req.friends.friends_val,
+ friend_req.friends.friends_len, &dict);
if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ goto out;
+ } else {
+ dict->extra_stdfree = friend_req.friends.friends_val;
}
+ }
- ret = 0;
- rcu_read_lock ();
- if (glusterd_peerinfo_find (friend_req.uuid, NULL) == NULL) {
- ret = -1;
- }
- rcu_read_unlock ();
- if (ret) {
- gf_msg (this->name, GF_LOG_CRITICAL, 0,
- GD_MSG_REQ_FROM_UNKNOWN_PEER,
- "Received friend update request "
- "from unknown peer %s", uuid_utoa (friend_req.uuid));
- gf_event (EVENT_UNKNOWN_PEER, "peer=%s",
- uuid_utoa (friend_req.uuid));
- goto out;
- }
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret)
+ goto out;
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_FRIEND_UPDATE_RCVD,
- "Received friend update from uuid: %s", uuid_utoa (friend_req.uuid));
-
- if (friend_req.friends.friends_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (friend_req.friends.friends_val,
- friend_req.friends.friends_len,
- &dict);
- if (ret < 0) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- goto out;
- } else {
- dict->extra_stdfree = friend_req.friends.friends_val;
- }
- }
+ ret = dict_get_int32n(dict, "op", SLEN("op"), &op);
+ if (ret)
+ goto out;
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &count);
- if (ret)
- goto out;
+ if (GD_FRIEND_UPDATE_DEL == op) {
+ (void)glusterd_handle_friend_update_delete(dict);
+ goto out;
+ }
- ret = dict_get_int32n (dict, "op", SLEN ("op"), &op);
+ args.mode = GD_MODE_ON;
+ while (i <= count) {
+ keylen = snprintf(key, sizeof(key), "friend%d.uuid", i);
+ ret = dict_get_strn(dict, key, keylen, &uuid_buf);
if (ret)
- goto out;
+ goto out;
+ gf_uuid_parse(uuid_buf, uuid);
- if (GD_FRIEND_UPDATE_DEL == op) {
- (void) glusterd_handle_friend_update_delete (dict);
- goto out;
+ if (!gf_uuid_compare(uuid, MY_UUID)) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_UUID_RECEIVED,
+ "Received my uuid as Friend");
+ i++;
+ continue;
}
- args.mode = GD_MODE_ON;
- while ( i <= count) {
- keylen = snprintf (key, sizeof (key), "friend%d.uuid", i);
- ret = dict_get_strn (dict, key, keylen, &uuid_buf);
- if (ret)
- goto out;
- gf_uuid_parse (uuid_buf, uuid);
-
- if (!gf_uuid_compare (uuid, MY_UUID)) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_UUID_RECEIVED,
- "Received my uuid as Friend");
- i++;
- continue;
- }
-
- snprintf (key, sizeof (key), "friend%d", i);
-
- rcu_read_lock ();
- peerinfo = glusterd_peerinfo_find (uuid, NULL);
- if (peerinfo == NULL) {
- /* Create a new peer and add it to the list as there is
- * no existing peer with the uuid
- */
- peerinfo = gd_peerinfo_from_dict (dict, key);
- if (peerinfo == NULL) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PEERINFO_CREATE_FAIL,
- "Could not create peerinfo from dict "
- "for prefix %s", key);
- goto unlock;
- }
-
- /* As this is a new peer, it should be added as a
- * friend. The friend state machine will take care of
- * correcting the state as required
- */
- peerinfo->state.state = GD_FRIEND_STATE_BEFRIENDED;
+ snprintf(key, sizeof(key), "friend%d", i);
- ret = glusterd_friend_add_from_peerinfo (peerinfo, 0,
- &args);
- } else {
- /* As an existing peer was found, update it with the new
- * information
- */
- ret = gd_update_peerinfo_from_dict (peerinfo, dict,
- key);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PEER_INFO_UPDATE_FAIL,
- "Failed to "
- "update peer %s", peerinfo->hostname);
- goto unlock;
- }
- ret = glusterd_store_peerinfo (peerinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PEERINFO_CREATE_FAIL,
- "Failed to store peerinfo");
- gf_event (EVENT_PEER_STORE_FAILURE, "peer=%s",
- peerinfo->hostname);
- }
- }
-unlock:
- rcu_read_unlock ();
- if (ret)
- break;
-
- peerinfo = NULL;
- i++;
- }
-
-out:
- gf_uuid_copy (rsp.uuid, MY_UUID);
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
- if (dict) {
- if (!dict->extra_stdfree && friend_req.friends.friends_val)
- free (friend_req.friends.friends_val);//malloced by xdr
- dict_unref (dict);
+ rcu_read_lock();
+ peerinfo = glusterd_peerinfo_find(uuid, NULL);
+ if (peerinfo == NULL) {
+ /* Create a new peer and add it to the list as there is
+ * no existing peer with the uuid
+ */
+ peerinfo = gd_peerinfo_from_dict(dict, key);
+ if (peerinfo == NULL) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
+ "Could not create peerinfo from dict "
+ "for prefix %s",
+ key);
+ goto unlock;
+ }
+
+ /* As this is a new peer, it should be added as a
+ * friend. The friend state machine will take care of
+ * correcting the state as required
+ */
+ peerinfo->state.state = GD_FRIEND_STATE_BEFRIENDED;
+
+ ret = glusterd_friend_add_from_peerinfo(peerinfo, 0, &args);
} else {
- free (friend_req.friends.friends_val);//malloced by xdr
+ /* As an existing peer was found, update it with the new
+ * information
+ */
+ ret = gd_update_peerinfo_from_dict(peerinfo, dict, key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_PEER_INFO_UPDATE_FAIL,
+ "Failed to "
+ "update peer %s",
+ peerinfo->hostname);
+ goto unlock;
+ }
+ ret = glusterd_store_peerinfo(peerinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
+ "Failed to store peerinfo");
+ gf_event(EVENT_PEER_STORE_FAILURE, "peer=%s",
+ peerinfo->hostname);
+ }
}
+ unlock:
+ rcu_read_unlock();
+ if (ret)
+ break;
- if (peerinfo)
- glusterd_peerinfo_cleanup (peerinfo);
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ peerinfo = NULL;
+ i++;
+ }
- return ret;
+out:
+ gf_uuid_copy(rsp.uuid, MY_UUID);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
+ if (dict) {
+ if (!dict->extra_stdfree && friend_req.friends.friends_val)
+ free(friend_req.friends.friends_val); // malloced by xdr
+ dict_unref(dict);
+ } else {
+ free(friend_req.friends.friends_val); // malloced by xdr
+ }
+
+ if (peerinfo)
+ glusterd_peerinfo_cleanup(peerinfo);
+
+ glusterd_friend_sm();
+ glusterd_op_sm();
+
+ return ret;
}
int
-glusterd_handle_friend_update (rpcsvc_request_t *req)
+glusterd_handle_friend_update(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_friend_update);
+ return glusterd_big_locked_handler(req, __glusterd_handle_friend_update);
}
int
-__glusterd_handle_probe_query (rpcsvc_request_t *req)
+__glusterd_handle_probe_query(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- gd1_mgmt_probe_req probe_req = {{0},};
- gd1_mgmt_probe_rsp rsp = {{0},};
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peerctx_args_t args = {0};
- int port = 0;
- char remote_hostname[UNIX_PATH_MAX + 1] = {0,};
-
- GF_ASSERT (req);
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("xlator", (this != NULL), out);
-
- ret = xdr_to_generic (req->msg[0], &probe_req,
- (xdrproc_t)xdr_gd1_mgmt_probe_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode probe "
- "request");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- conf = this->private;
- if (probe_req.port)
- port = probe_req.port;
- else
- port = GF_DEFAULT_BASE_PORT;
-
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_PROBE_RCVD,
- "Received probe from uuid: %s", uuid_utoa (probe_req.uuid));
-
- /* Check for uuid collision and handle it in a user friendly way by
- * sending the error.
- */
- if (!gf_uuid_compare (probe_req.uuid, MY_UUID)) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_UUIDS_SAME_RETRY, "Peer uuid %s is same as "
- "local uuid. Please check the uuid of both the peers "
- "from %s/%s", uuid_utoa (probe_req.uuid),
- GLUSTERD_DEFAULT_WORKDIR, GLUSTERD_INFO_FILE);
- rsp.op_ret = -1;
- rsp.op_errno = GF_PROBE_SAME_UUID;
- rsp.port = port;
- goto respond;
- }
-
- ret = glusterd_remote_hostname_get (req, remote_hostname,
- sizeof (remote_hostname));
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gd1_mgmt_probe_req probe_req = {
+ {0},
+ };
+ gd1_mgmt_probe_rsp rsp = {
+ {0},
+ };
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_args_t args = {0};
+ int port = 0;
+ char remote_hostname[UNIX_PATH_MAX + 1] = {
+ 0,
+ };
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("xlator", (this != NULL), out);
+
+ ret = xdr_to_generic(req->msg[0], &probe_req,
+ (xdrproc_t)xdr_gd1_mgmt_probe_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode probe "
+ "request");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ conf = this->private;
+ if (probe_req.port)
+ port = probe_req.port;
+ else
+ port = GF_DEFAULT_BASE_PORT;
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PROBE_RCVD,
+ "Received probe from uuid: %s", uuid_utoa(probe_req.uuid));
+
+ /* Check for uuid collision and handle it in a user friendly way by
+ * sending the error.
+ */
+ if (!gf_uuid_compare(probe_req.uuid, MY_UUID)) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_UUIDS_SAME_RETRY,
+ "Peer uuid %s is same as "
+ "local uuid. Please check the uuid of both the peers "
+ "from %s/%s",
+ uuid_utoa(probe_req.uuid), GLUSTERD_DEFAULT_WORKDIR,
+ GLUSTERD_INFO_FILE);
+ rsp.op_ret = -1;
+ rsp.op_errno = GF_PROBE_SAME_UUID;
+ rsp.port = port;
+ goto respond;
+ }
+
+ ret = glusterd_remote_hostname_get(req, remote_hostname,
+ sizeof(remote_hostname));
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
+ "Unable to get the remote hostname");
+ goto out;
+ }
+
+ rcu_read_lock();
+ peerinfo = glusterd_peerinfo_find(probe_req.uuid, remote_hostname);
+ if ((peerinfo == NULL) && (!cds_list_empty(&conf->peers))) {
+ rsp.op_ret = -1;
+ rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER;
+ } else if (peerinfo == NULL) {
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PEER_NOT_FOUND,
+ "Unable to find peerinfo"
+ " for host: %s (%d)",
+ remote_hostname, port);
+ args.mode = GD_MODE_ON;
+ ret = glusterd_friend_add(remote_hostname, port,
+ GD_FRIEND_STATE_PROBE_RCVD, NULL, &peerinfo,
+ 0, &args);
if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_HOSTNAME_RESOLVE_FAIL,
- "Unable to get the remote hostname");
- goto out;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_PEER_ADD_FAIL,
+ "Failed to add peer %s", remote_hostname);
+ rsp.op_errno = GF_PROBE_ADD_FAILED;
}
-
- rcu_read_lock ();
- peerinfo = glusterd_peerinfo_find (probe_req.uuid, remote_hostname);
- if ((peerinfo == NULL) && (!cds_list_empty (&conf->peers))) {
- rsp.op_ret = -1;
- rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER;
- } else if (peerinfo == NULL) {
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_PEER_NOT_FOUND,
- "Unable to find peerinfo"
- " for host: %s (%d)", remote_hostname, port);
- args.mode = GD_MODE_ON;
- ret = glusterd_friend_add (remote_hostname, port,
- GD_FRIEND_STATE_PROBE_RCVD,
- NULL, &peerinfo, 0, &args);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_PEER_ADD_FAIL,
- "Failed to add peer %s",
- remote_hostname);
- rsp.op_errno = GF_PROBE_ADD_FAILED;
- }
- }
- rcu_read_unlock ();
+ }
+ rcu_read_unlock();
respond:
- gf_uuid_copy (rsp.uuid, MY_UUID);
+ gf_uuid_copy(rsp.uuid, MY_UUID);
- rsp.hostname = probe_req.hostname;
- rsp.op_errstr = "";
+ rsp.hostname = probe_req.hostname;
+ rsp.op_errstr = "";
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
- ret = 0;
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
+ ret = 0;
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_RESPONSE_INFO, "Responded to %s, op_ret: %d, "
- "op_errno: %d, ret: %d", remote_hostname,
- rsp.op_ret, rsp.op_errno, ret);
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
+ "Responded to %s, op_ret: %d, "
+ "op_errno: %d, ret: %d",
+ remote_hostname, rsp.op_ret, rsp.op_errno, ret);
out:
- free (probe_req.hostname);//malloced by xdr
+ free(probe_req.hostname); // malloced by xdr
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
-int glusterd_handle_probe_query (rpcsvc_request_t *req)
+int
+glusterd_handle_probe_query(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_probe_query);
+ return glusterd_big_locked_handler(req, __glusterd_handle_probe_query);
}
int
-__glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
+__glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- dict_t *dict = NULL;
- glusterd_op_t cli_op = GD_OP_PROFILE_VOLUME;
- char *volname = NULL;
- int32_t op = 0;
- char err_str[64] = {0,};
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len > 0) {
- dict = dict_new();
- if (!dict)
- goto out;
- dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len, &dict);
- }
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "name");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
- goto out;
- }
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_PROFILE_REQ_RCVD,
- "Received volume profile req "
- "for volume %s", volname);
- ret = dict_get_int32n (dict, "op", SLEN ("op"), &op);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get operation");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
-
- ret = glusterd_op_begin (req, cli_op, dict, err_str, sizeof (err_str));
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_PROFILE_VOLUME;
+ char *volname = NULL;
+ int32_t op = 0;
+ char err_str[64] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len > 0) {
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict);
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volume "
+ "name");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
+ "%s", err_str);
+ goto out;
+ }
+
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_PROFILE_REQ_RCVD,
+ "Received volume profile req "
+ "for volume %s",
+ volname);
+ ret = dict_get_int32n(dict, "op", SLEN("op"), &op);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str), "Unable to get operation");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+
+ ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
out:
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- free (cli_req.dict.dict_val);
+ free(cli_req.dict.dict_val);
- if (ret) {
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
- }
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_handle_cli_profile_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_cli_profile_volume);
+ return glusterd_big_locked_handler(req,
+ __glusterd_handle_cli_profile_volume);
}
int
-__glusterd_handle_getwd (rpcsvc_request_t *req)
+__glusterd_handle_getwd(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf1_cli_getwd_rsp rsp = {0,};
- glusterd_conf_t *priv = NULL;
+ int32_t ret = -1;
+ gf1_cli_getwd_rsp rsp = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
- GF_ASSERT (req);
+ GF_ASSERT(req);
- priv = THIS->private;
- GF_ASSERT (priv);
+ priv = THIS->private;
+ GF_ASSERT(priv);
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_GETWD_REQ_RCVD, "Received getwd req");
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_GETWD_REQ_RCVD,
+ "Received getwd req");
- rsp.wd = priv->workdir;
+ rsp.wd = priv->workdir;
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf1_cli_getwd_rsp);
- ret = 0;
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_getwd_rsp);
+ ret = 0;
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_getwd (rpcsvc_request_t *req)
+glusterd_handle_getwd(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_getwd);
+ return glusterd_big_locked_handler(req, __glusterd_handle_getwd);
}
int
-__glusterd_handle_mount (rpcsvc_request_t *req)
+__glusterd_handle_mount(rpcsvc_request_t *req)
{
- gf1_cli_mount_req mnt_req = {0,};
- gf1_cli_mount_rsp rsp = {0,};
- dict_t *dict = NULL;
- int ret = 0;
- glusterd_conf_t *priv = NULL;
-
- GF_ASSERT (req);
- priv = THIS->private;
+ gf1_cli_mount_req mnt_req = {
+ 0,
+ };
+ gf1_cli_mount_rsp rsp = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ int ret = 0;
+ glusterd_conf_t *priv = NULL;
+
+ GF_ASSERT(req);
+ priv = THIS->private;
+
+ ret = xdr_to_generic(req->msg[0], &mnt_req,
+ (xdrproc_t)xdr_gf1_cli_mount_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode mount "
+ "request received");
+ req->rpc_err = GARBAGE_ARGS;
+ rsp.op_ret = -1;
+ rsp.op_errno = EINVAL;
+ goto out;
+ }
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_MOUNT_REQ_RCVD,
+ "Received mount req");
+
+ if (mnt_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
- ret = xdr_to_generic (req->msg[0], &mnt_req,
- (xdrproc_t)xdr_gf1_cli_mount_req);
+ ret = dict_unserialize(mnt_req.dict.dict_val, mnt_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- //failed to decode msg;
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode mount "
- "request received");
- req->rpc_err = GARBAGE_ARGS;
- rsp.op_ret = -1;
- rsp.op_errno = EINVAL;
- goto out;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ rsp.op_ret = -1;
+ rsp.op_errno = -EINVAL;
+ goto out;
+ } else {
+ dict->extra_stdfree = mnt_req.dict.dict_val;
}
+ }
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_MOUNT_REQ_RCVD,
- "Received mount req");
-
- if (mnt_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (mnt_req.dict.dict_val,
- mnt_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- rsp.op_ret = -1;
- rsp.op_errno = -EINVAL;
- goto out;
- } else {
- dict->extra_stdfree = mnt_req.dict.dict_val;
- }
- }
+ synclock_unlock(&priv->big_lock);
+ rsp.op_ret = glusterd_do_mount(mnt_req.label, dict, &rsp.path,
+ &rsp.op_errno);
+ synclock_lock(&priv->big_lock);
- synclock_unlock (&priv->big_lock);
- rsp.op_ret = glusterd_do_mount (mnt_req.label, dict,
- &rsp.path, &rsp.op_errno);
- synclock_lock (&priv->big_lock);
+out:
+ if (!rsp.path)
+ rsp.path = gf_strdup("");
- out:
- if (!rsp.path)
- rsp.path = gf_strdup("");
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_mount_rsp);
+ ret = 0;
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf1_cli_mount_rsp);
- ret = 0;
+ if (dict)
+ dict_unref(dict);
- if (dict)
- dict_unref (dict);
-
- GF_FREE (rsp.path);
+ GF_FREE(rsp.path);
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_mount (rpcsvc_request_t *req)
+glusterd_handle_mount(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_mount);
+ return glusterd_big_locked_handler(req, __glusterd_handle_mount);
}
int
-__glusterd_handle_umount (rpcsvc_request_t *req)
+__glusterd_handle_umount(rpcsvc_request_t *req)
{
- gf1_cli_umount_req umnt_req = {0,};
- gf1_cli_umount_rsp rsp = {0,};
- char *mountbroker_root = NULL;
- char mntp[PATH_MAX] = {0,};
- char *path = NULL;
- runner_t runner = {0,};
- int ret = 0;
- xlator_t *this = THIS;
- gf_boolean_t dir_ok = _gf_false;
- char *pdir = NULL;
- char *t = NULL;
- glusterd_conf_t *priv = NULL;
-
- GF_ASSERT (req);
- GF_ASSERT (this);
- priv = this->private;
-
- ret = xdr_to_generic (req->msg[0], &umnt_req,
- (xdrproc_t)xdr_gf1_cli_umount_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode umount"
- "request");
- req->rpc_err = GARBAGE_ARGS;
- rsp.op_ret = -1;
- goto out;
- }
-
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_UMOUNT_REQ_RCVD,
- "Received umount req");
-
- if (dict_get_strn (this->options, "mountbroker-root",
- SLEN ("mountbroker-root"),
- &mountbroker_root) != 0) {
- rsp.op_errno = ENOENT;
- goto out;
- }
-
- /* check if it is allowed to umount path */
- path = gf_strdup (umnt_req.path);
- if (!path) {
- rsp.op_errno = ENOMEM;
- goto out;
- }
- dir_ok = _gf_false;
- pdir = dirname (path);
- t = strtail (pdir, mountbroker_root);
- if (t && *t == '/') {
- t = strtail(++t, MB_HIVE);
- if (t && !*t)
- dir_ok = _gf_true;
- }
- GF_FREE (path);
- if (!dir_ok) {
- rsp.op_errno = EACCES;
- goto out;
- }
-
- synclock_unlock (&priv->big_lock);
-
- if (umnt_req.lazy) {
- rsp.op_ret = gf_umount_lazy (this->name, umnt_req.path, 0);
- } else {
- runinit (&runner);
- runner_add_args (&runner, _PATH_UMOUNT, umnt_req.path, NULL);
- rsp.op_ret = runner_run (&runner);
+ gf1_cli_umount_req umnt_req = {
+ 0,
+ };
+ gf1_cli_umount_rsp rsp = {
+ 0,
+ };
+ char *mountbroker_root = NULL;
+ char mntp[PATH_MAX] = {
+ 0,
+ };
+ char *path = NULL;
+ runner_t runner = {
+ 0,
+ };
+ int ret = 0;
+ xlator_t *this = THIS;
+ gf_boolean_t dir_ok = _gf_false;
+ char *pdir = NULL;
+ char *t = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ GF_ASSERT(req);
+ GF_ASSERT(this);
+ priv = this->private;
+
+ ret = xdr_to_generic(req->msg[0], &umnt_req,
+ (xdrproc_t)xdr_gf1_cli_umount_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode umount"
+ "request");
+ req->rpc_err = GARBAGE_ARGS;
+ rsp.op_ret = -1;
+ goto out;
+ }
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_UMOUNT_REQ_RCVD,
+ "Received umount req");
+
+ if (dict_get_strn(this->options, "mountbroker-root",
+ SLEN("mountbroker-root"), &mountbroker_root) != 0) {
+ rsp.op_errno = ENOENT;
+ goto out;
+ }
+
+ /* check if it is allowed to umount path */
+ path = gf_strdup(umnt_req.path);
+ if (!path) {
+ rsp.op_errno = ENOMEM;
+ goto out;
+ }
+ dir_ok = _gf_false;
+ pdir = dirname(path);
+ t = strtail(pdir, mountbroker_root);
+ if (t && *t == '/') {
+ t = strtail(++t, MB_HIVE);
+ if (t && !*t)
+ dir_ok = _gf_true;
+ }
+ GF_FREE(path);
+ if (!dir_ok) {
+ rsp.op_errno = EACCES;
+ goto out;
+ }
+
+ synclock_unlock(&priv->big_lock);
+
+ if (umnt_req.lazy) {
+ rsp.op_ret = gf_umount_lazy(this->name, umnt_req.path, 0);
+ } else {
+ runinit(&runner);
+ runner_add_args(&runner, _PATH_UMOUNT, umnt_req.path, NULL);
+ rsp.op_ret = runner_run(&runner);
+ }
+
+ synclock_lock(&priv->big_lock);
+ if (rsp.op_ret == 0) {
+ if (realpath(umnt_req.path, mntp))
+ sys_rmdir(mntp);
+ else {
+ rsp.op_ret = -1;
+ rsp.op_errno = errno;
}
-
- synclock_lock (&priv->big_lock);
- if (rsp.op_ret == 0) {
- if (realpath (umnt_req.path, mntp))
- sys_rmdir (mntp);
- else {
- rsp.op_ret = -1;
- rsp.op_errno = errno;
- }
- if (sys_unlink (umnt_req.path) != 0) {
- rsp.op_ret = -1;
- rsp.op_errno = errno;
- }
+ if (sys_unlink(umnt_req.path) != 0) {
+ rsp.op_ret = -1;
+ rsp.op_errno = errno;
}
+ }
- out:
- if (rsp.op_errno)
- rsp.op_ret = -1;
+out:
+ if (rsp.op_errno)
+ rsp.op_ret = -1;
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf1_cli_umount_rsp);
- ret = 0;
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_umount_rsp);
+ ret = 0;
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- return ret;
+ return ret;
}
int
-glusterd_handle_umount (rpcsvc_request_t *req)
+glusterd_handle_umount(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_umount);
+ return glusterd_big_locked_handler(req, __glusterd_handle_umount);
}
int
-glusterd_friend_remove (uuid_t uuid, char *hostname)
+glusterd_friend_remove(uuid_t uuid, char *hostname)
{
- int ret = -1;
- glusterd_peerinfo_t *peerinfo = NULL;
-
- rcu_read_lock ();
-
- peerinfo = glusterd_peerinfo_find (uuid, hostname);
- if (peerinfo == NULL) {
- rcu_read_unlock ();
- goto out;
- }
-
- ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid);
- if (ret)
- gf_msg (THIS->name, GF_LOG_WARNING, 0,
- GD_MSG_VOL_CLEANUP_FAIL, "Volumes cleanup failed");
- rcu_read_unlock ();
- /* Giving up the critical section here as glusterd_peerinfo_cleanup must
- * be called from outside a critical section
- */
- ret = glusterd_peerinfo_cleanup (peerinfo);
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+
+ rcu_read_lock();
+
+ peerinfo = glusterd_peerinfo_find(uuid, hostname);
+ if (peerinfo == NULL) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ ret = glusterd_friend_remove_cleanup_vols(peerinfo->uuid);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
+ "Volumes cleanup failed");
+ rcu_read_unlock();
+ /* Giving up the critical section here as glusterd_peerinfo_cleanup must
+ * be called from outside a critical section
+ */
+ ret = glusterd_peerinfo_cleanup(peerinfo);
out:
- gf_msg_debug (THIS->name, 0, "returning %d", ret);
- return ret;
+ gf_msg_debug(THIS->name, 0, "returning %d", ret);
+ return ret;
}
int
-glusterd_rpc_create (struct rpc_clnt **rpc,
- dict_t *options,
- rpc_clnt_notify_t notify_fn,
- void *notify_data,
- gf_boolean_t force)
+glusterd_rpc_create(struct rpc_clnt **rpc, dict_t *options,
+ rpc_clnt_notify_t notify_fn, void *notify_data,
+ gf_boolean_t force)
{
- struct rpc_clnt *new_rpc = NULL;
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (options);
-
- if (force && rpc && *rpc) {
- (void) rpc_clnt_unref (*rpc);
- *rpc = NULL;
- }
-
- /* TODO: is 32 enough? or more ? */
- new_rpc = rpc_clnt_new (options, this, this->name, 16);
- if (!new_rpc)
- goto out;
-
- ret = rpc_clnt_register_notify (new_rpc, notify_fn, notify_data);
- *rpc = new_rpc;
- if (ret)
- goto out;
- ret = rpc_clnt_start (new_rpc);
+ struct rpc_clnt *new_rpc = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(options);
+
+ if (force && rpc && *rpc) {
+ (void)rpc_clnt_unref(*rpc);
+ *rpc = NULL;
+ }
+
+ /* TODO: is 32 enough? or more ? */
+ new_rpc = rpc_clnt_new(options, this, this->name, 16);
+ if (!new_rpc)
+ goto out;
+
+ ret = rpc_clnt_register_notify(new_rpc, notify_fn, notify_data);
+ *rpc = new_rpc;
+ if (ret)
+ goto out;
+ ret = rpc_clnt_start(new_rpc);
out:
- if (ret) {
- if (new_rpc) {
- (void) rpc_clnt_unref (new_rpc);
- }
+ if (ret) {
+ if (new_rpc) {
+ (void)rpc_clnt_unref(new_rpc);
}
+ }
- gf_msg_debug (this->name, 0, "returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ return ret;
}
int
-glusterd_transport_inet_options_build (dict_t **options, const char *hostname,
- int port)
+glusterd_transport_inet_options_build(dict_t **options, const char *hostname,
+ int port)
{
- xlator_t *this = NULL;
- dict_t *dict = NULL;
- int32_t interval = -1;
- int32_t time = -1;
- int32_t timeout = -1;
- int ret = 0;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (options);
- GF_ASSERT (hostname);
-
- if (!port)
- port = GLUSTERD_DEFAULT_PORT;
-
- /* Build default transport options */
- ret = rpc_transport_inet_options_build (&dict, hostname, port);
- if (ret)
- goto out;
-
- /* Set frame-timeout to 10mins. Default timeout of 30 mins is too long
- * when compared to 2 mins for cli timeout. This ensures users don't
- * wait too long after cli timesout before being able to resume normal
- * operations
- */
- ret = dict_set_int32n (dict, "frame-timeout", SLEN ("frame-timeout"),
- 600);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set frame-timeout");
- goto out;
- }
-
- /* Set keepalive options */
- ret = dict_get_int32n (this->options,
- "transport.socket.keepalive-interval",
- SLEN ("transport.socket.keepalive-interval"),
- &interval);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get socket keepalive-interval");
- }
- ret = dict_get_int32n (this->options,
- "transport.socket.keepalive-time",
- SLEN ("transport.socket.keepalive-time"),
- &time);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get socket keepalive-time");
- }
- ret = dict_get_int32n (this->options,
- "transport.tcp-user-timeout",
- SLEN ("transport.tcp-user-timeout"),
- &timeout);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get tcp-user-timeout");
- }
-
- if ((interval > 0) || (time > 0))
- ret = rpc_transport_keepalive_options_set (dict, interval,
- time, timeout);
- *options = dict;
+ xlator_t *this = NULL;
+ dict_t *dict = NULL;
+ int32_t interval = -1;
+ int32_t time = -1;
+ int32_t timeout = -1;
+ int ret = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(options);
+ GF_ASSERT(hostname);
+
+ if (!port)
+ port = GLUSTERD_DEFAULT_PORT;
+
+ /* Build default transport options */
+ ret = rpc_transport_inet_options_build(&dict, hostname, port);
+ if (ret)
+ goto out;
+
+ /* Set frame-timeout to 10mins. Default timeout of 30 mins is too long
+ * when compared to 2 mins for cli timeout. This ensures users don't
+ * wait too long after cli timesout before being able to resume normal
+ * operations
+ */
+ ret = dict_set_int32n(dict, "frame-timeout", SLEN("frame-timeout"), 600);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set frame-timeout");
+ goto out;
+ }
+
+ /* Set keepalive options */
+ ret = dict_get_int32n(this->options, "transport.socket.keepalive-interval",
+ SLEN("transport.socket.keepalive-interval"),
+ &interval);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get socket keepalive-interval");
+ }
+ ret = dict_get_int32n(this->options, "transport.socket.keepalive-time",
+ SLEN("transport.socket.keepalive-time"), &time);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get socket keepalive-time");
+ }
+ ret = dict_get_int32n(this->options, "transport.tcp-user-timeout",
+ SLEN("transport.tcp-user-timeout"), &timeout);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get tcp-user-timeout");
+ }
+
+ if ((interval > 0) || (time > 0))
+ ret = rpc_transport_keepalive_options_set(dict, interval, time,
+ timeout);
+ *options = dict;
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_friend_rpc_create (xlator_t *this, glusterd_peerinfo_t *peerinfo,
- glusterd_peerctx_args_t *args)
+glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
+ glusterd_peerctx_args_t *args)
{
- dict_t *options = NULL;
- int ret = -1;
- glusterd_peerctx_t *peerctx = NULL;
- data_t *data = NULL;
-
- peerctx = GF_CALLOC (1, sizeof (*peerctx), gf_gld_mt_peerctx_t);
- if (!peerctx)
- goto out;
-
- if (args)
- peerctx->args = *args;
-
- gf_uuid_copy (peerctx->peerid, peerinfo->uuid);
- peerctx->peername = gf_strdup (peerinfo->hostname);
- peerctx->peerinfo_gen = peerinfo->generation; /* A peerinfos generation
- number can be used to
- uniquely identify a
- peerinfo */
-
- ret = glusterd_transport_inet_options_build (&options,
- peerinfo->hostname,
- peerinfo->port);
- if (ret)
- goto out;
-
- /*
- * For simulated multi-node testing, we need to make sure that we
- * create our RPC endpoint with the same address that the peer would
- * use to reach us.
- */
- if (this->options) {
- data = dict_getn (this->options,
- "transport.socket.bind-address",
- SLEN ("transport.socket.bind-address"));
- if (data) {
- ret = dict_setn (options,
- "transport.socket.source-addr",
- SLEN ("transport.socket.source-addr"),
- data);
- }
- data = dict_getn (this->options, "ping-timeout",
- SLEN ("ping-timeout"));
- if (data) {
- ret = dict_setn (options,
- "ping-timeout",
- SLEN ("ping-timeout"), data);
- }
- }
-
- /* Enable encryption for the client connection if management encryption
- * is enabled
- */
- if (this->ctx->secure_mgmt) {
- ret = dict_set_nstrn (options, "transport.socket.ssl-enabled",
- SLEN ("transport.socket.ssl-enabled"),
- "on", SLEN ("on"));
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "failed to set ssl-enabled in dict");
- goto out;
- }
-
- this->ctx->ssl_cert_depth = glusterfs_read_secure_access_file ();
-
- }
-
- ret = glusterd_rpc_create (&peerinfo->rpc, options,
- glusterd_peer_rpc_notify, peerctx,
- _gf_false);
+ dict_t *options = NULL;
+ int ret = -1;
+ glusterd_peerctx_t *peerctx = NULL;
+ data_t *data = NULL;
+
+ peerctx = GF_CALLOC(1, sizeof(*peerctx), gf_gld_mt_peerctx_t);
+ if (!peerctx)
+ goto out;
+
+ if (args)
+ peerctx->args = *args;
+
+ gf_uuid_copy(peerctx->peerid, peerinfo->uuid);
+ peerctx->peername = gf_strdup(peerinfo->hostname);
+ peerctx->peerinfo_gen = peerinfo->generation; /* A peerinfos generation
+ number can be used to
+ uniquely identify a
+ peerinfo */
+
+ ret = glusterd_transport_inet_options_build(&options, peerinfo->hostname,
+ peerinfo->port);
+ if (ret)
+ goto out;
+
+ /*
+ * For simulated multi-node testing, we need to make sure that we
+ * create our RPC endpoint with the same address that the peer would
+ * use to reach us.
+ */
+ if (this->options) {
+ data = dict_getn(this->options, "transport.socket.bind-address",
+ SLEN("transport.socket.bind-address"));
+ if (data) {
+ ret = dict_setn(options, "transport.socket.source-addr",
+ SLEN("transport.socket.source-addr"), data);
+ }
+ data = dict_getn(this->options, "ping-timeout", SLEN("ping-timeout"));
+ if (data) {
+ ret = dict_setn(options, "ping-timeout", SLEN("ping-timeout"),
+ data);
+ }
+ }
+
+ /* Enable encryption for the client connection if management encryption
+ * is enabled
+ */
+ if (this->ctx->secure_mgmt) {
+ ret = dict_set_nstrn(options, "transport.socket.ssl-enabled",
+ SLEN("transport.socket.ssl-enabled"), "on",
+ SLEN("on"));
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_RPC_CREATE_FAIL,
- "failed to create rpc for"
- " peer %s", peerinfo->hostname);
- gf_event (EVENT_PEER_RPC_CREATE_FAILED, "peer=%s",
- peerinfo->hostname);
- goto out;
- }
- peerctx = NULL;
- ret = 0;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set ssl-enabled in dict");
+ goto out;
+ }
+
+ this->ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
+ }
+
+ ret = glusterd_rpc_create(&peerinfo->rpc, options, glusterd_peer_rpc_notify,
+ peerctx, _gf_false);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_CREATE_FAIL,
+ "failed to create rpc for"
+ " peer %s",
+ peerinfo->hostname);
+ gf_event(EVENT_PEER_RPC_CREATE_FAILED, "peer=%s", peerinfo->hostname);
+ goto out;
+ }
+ peerctx = NULL;
+ ret = 0;
out:
- GF_FREE (peerctx);
- return ret;
+ GF_FREE(peerctx);
+ return ret;
}
int
-glusterd_friend_add (const char *hoststr, int port,
- glusterd_friend_sm_state_t state,
- uuid_t *uuid,
- glusterd_peerinfo_t **friend,
- gf_boolean_t restore,
- glusterd_peerctx_args_t *args)
+glusterd_friend_add(const char *hoststr, int port,
+ glusterd_friend_sm_state_t state, uuid_t *uuid,
+ glusterd_peerinfo_t **friend, gf_boolean_t restore,
+ glusterd_peerctx_args_t *args)
{
- int ret = 0;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- conf = this->private;
- GF_ASSERT (conf);
- GF_ASSERT (hoststr);
- GF_ASSERT (friend);
-
- *friend = glusterd_peerinfo_new (state, uuid, hoststr, port);
- if (*friend == NULL) {
- ret = -1;
- goto out;
- }
-
- /*
- * We can't add to the list after calling glusterd_friend_rpc_create,
- * even if it succeeds, because by then the callback to take it back
- * off and free might have happened already (notably in the case of an
- * invalid peer name). That would mean we're adding something that had
- * just been free, and we're likely to crash later.
- */
- cds_list_add_tail_rcu (&(*friend)->uuid_list, &conf->peers);
-
- //restore needs to first create the list of peers, then create rpcs
- //to keep track of quorum in race-free manner. In restore for each peer
- //rpc-create calls rpc_notify when the friend-list is partially
- //constructed, leading to wrong quorum calculations.
- if (!restore) {
- ret = glusterd_store_peerinfo (*friend);
- if (ret == 0) {
- ret = glusterd_friend_rpc_create (this, *friend, args);
- }
- else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PEERINFO_CREATE_FAIL,
- "Failed to store peerinfo");
- gf_event (EVENT_PEER_STORE_FAILURE, "peer=%s",
- (*friend)->hostname);
- }
+ int ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT(conf);
+ GF_ASSERT(hoststr);
+ GF_ASSERT(friend);
+
+ *friend = glusterd_peerinfo_new(state, uuid, hoststr, port);
+ if (*friend == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ /*
+ * We can't add to the list after calling glusterd_friend_rpc_create,
+ * even if it succeeds, because by then the callback to take it back
+ * off and free might have happened already (notably in the case of an
+ * invalid peer name). That would mean we're adding something that had
+ * just been free, and we're likely to crash later.
+ */
+ cds_list_add_tail_rcu(&(*friend)->uuid_list, &conf->peers);
+
+ // restore needs to first create the list of peers, then create rpcs
+ // to keep track of quorum in race-free manner. In restore for each peer
+ // rpc-create calls rpc_notify when the friend-list is partially
+ // constructed, leading to wrong quorum calculations.
+ if (!restore) {
+ ret = glusterd_store_peerinfo(*friend);
+ if (ret == 0) {
+ ret = glusterd_friend_rpc_create(this, *friend, args);
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
+ "Failed to store peerinfo");
+ gf_event(EVENT_PEER_STORE_FAILURE, "peer=%s", (*friend)->hostname);
}
+ }
- if (ret) {
- (void) glusterd_peerinfo_cleanup (*friend);
- *friend = NULL;
- }
+ if (ret) {
+ (void)glusterd_peerinfo_cleanup(*friend);
+ *friend = NULL;
+ }
out:
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_CONNECT_RETURNED, "connect returned %d", ret);
- return ret;
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
+ "connect returned %d", ret);
+ return ret;
}
/* glusterd_friend_add_from_peerinfo() adds a new peer into the local friends
@@ -3616,2997 +3604,3075 @@ out:
* glusterd_friend_add()
*/
int
-glusterd_friend_add_from_peerinfo (glusterd_peerinfo_t *friend,
- gf_boolean_t restore,
- glusterd_peerctx_args_t *args)
+glusterd_friend_add_from_peerinfo(glusterd_peerinfo_t *friend,
+ gf_boolean_t restore,
+ glusterd_peerctx_args_t *args)
{
- int ret = 0;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- conf = this->private;
- GF_ASSERT (conf);
-
- GF_VALIDATE_OR_GOTO (this->name, (friend != NULL), out);
-
- /*
- * We can't add to the list after calling glusterd_friend_rpc_create,
- * even if it succeeds, because by then the callback to take it back
- * off and free might have happened already (notably in the case of an
- * invalid peer name). That would mean we're adding something that had
- * just been free, and we're likely to crash later.
- */
- cds_list_add_tail_rcu (&friend->uuid_list, &conf->peers);
-
- //restore needs to first create the list of peers, then create rpcs
- //to keep track of quorum in race-free manner. In restore for each peer
- //rpc-create calls rpc_notify when the friend-list is partially
- //constructed, leading to wrong quorum calculations.
- if (!restore) {
- ret = glusterd_store_peerinfo (friend);
- if (ret == 0) {
- ret = glusterd_friend_rpc_create (this, friend, args);
- }
- else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PEERINFO_CREATE_FAIL,
- "Failed to store peerinfo");
- gf_event (EVENT_PEER_STORE_FAILURE, "peer=%s",
- friend->hostname);
- }
+ int ret = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_VALIDATE_OR_GOTO(this->name, (friend != NULL), out);
+
+ /*
+ * We can't add to the list after calling glusterd_friend_rpc_create,
+ * even if it succeeds, because by then the callback to take it back
+ * off and free might have happened already (notably in the case of an
+ * invalid peer name). That would mean we're adding something that had
+ * just been free, and we're likely to crash later.
+ */
+ cds_list_add_tail_rcu(&friend->uuid_list, &conf->peers);
+
+ // restore needs to first create the list of peers, then create rpcs
+ // to keep track of quorum in race-free manner. In restore for each peer
+ // rpc-create calls rpc_notify when the friend-list is partially
+ // constructed, leading to wrong quorum calculations.
+ if (!restore) {
+ ret = glusterd_store_peerinfo(friend);
+ if (ret == 0) {
+ ret = glusterd_friend_rpc_create(this, friend, args);
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
+ "Failed to store peerinfo");
+ gf_event(EVENT_PEER_STORE_FAILURE, "peer=%s", friend->hostname);
}
+ }
out:
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_CONNECT_RETURNED,
- "connect returned %d", ret);
- return ret;
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
+ "connect returned %d", ret);
+ return ret;
}
int
-glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
- dict_t *dict, int *op_errno)
+glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
+ dict_t *dict, int *op_errno)
{
- int ret = -1;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peerctx_args_t args = {0};
- glusterd_friend_sm_event_t *event = NULL;
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_args_t args = {0};
+ glusterd_friend_sm_event_t *event = NULL;
- GF_ASSERT (hoststr);
+ GF_ASSERT(hoststr);
- rcu_read_lock ();
- peerinfo = glusterd_peerinfo_find (NULL, hoststr);
+ rcu_read_lock();
+ peerinfo = glusterd_peerinfo_find(NULL, hoststr);
- if (peerinfo == NULL) {
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
- " for host: %s (%d)", hoststr, port);
- args.mode = GD_MODE_ON;
- args.req = req;
- args.dict = dict;
- ret = glusterd_friend_add (hoststr, port,
- GD_FRIEND_STATE_DEFAULT,
- NULL, &peerinfo, 0, &args);
- if ((!ret) && (!peerinfo->connected)) {
- ret = GLUSTERD_CONNECTION_AWAITED;
- }
+ if (peerinfo == NULL) {
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PEER_NOT_FOUND,
+ "Unable to find peerinfo"
+ " for host: %s (%d)",
+ hoststr, port);
+ args.mode = GD_MODE_ON;
+ args.req = req;
+ args.dict = dict;
+ ret = glusterd_friend_add(hoststr, port, GD_FRIEND_STATE_DEFAULT, NULL,
+ &peerinfo, 0, &args);
+ if ((!ret) && (!peerinfo->connected)) {
+ ret = GLUSTERD_CONNECTION_AWAITED;
+ }
- } else if (peerinfo->connected &&
- (GD_FRIEND_STATE_BEFRIENDED == peerinfo->state.state)) {
- if (peerinfo->detaching) {
- ret = -1;
- if (op_errno)
- *op_errno = GF_PROBE_FRIEND_DETACHING;
- goto out;
- }
- ret = glusterd_peer_hostname_update (peerinfo, hoststr,
- _gf_false);
- if (ret)
- goto out;
- // Injecting a NEW_NAME event to update cluster
- ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_NEW_NAME,
- &event);
- if (!ret) {
- event->peername = gf_strdup (peerinfo->hostname);
- gf_uuid_copy (event->peerid, peerinfo->uuid);
+ } else if (peerinfo->connected &&
+ (GD_FRIEND_STATE_BEFRIENDED == peerinfo->state.state)) {
+ if (peerinfo->detaching) {
+ ret = -1;
+ if (op_errno)
+ *op_errno = GF_PROBE_FRIEND_DETACHING;
+ goto out;
+ }
+ ret = glusterd_peer_hostname_update(peerinfo, hoststr, _gf_false);
+ if (ret)
+ goto out;
+ // Injecting a NEW_NAME event to update cluster
+ ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_NEW_NAME, &event);
+ if (!ret) {
+ event->peername = gf_strdup(peerinfo->hostname);
+ gf_uuid_copy(event->peerid, peerinfo->uuid);
- ret = glusterd_friend_sm_inject_event (event);
- glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_SUCCESS,
- NULL, (char*)hoststr,
- port, dict);
- }
- } else {
- glusterd_xfer_cli_probe_resp (req, 0, GF_PROBE_FRIEND, NULL,
- (char*)hoststr, port, dict);
- ret = 0;
+ ret = glusterd_friend_sm_inject_event(event);
+ glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_SUCCESS, NULL,
+ (char *)hoststr, port, dict);
}
+ } else {
+ glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_FRIEND, NULL,
+ (char *)hoststr, port, dict);
+ ret = 0;
+ }
out:
- rcu_read_unlock ();
- gf_msg_debug ("glusterd", 0, "returning %d", ret);
- return ret;
+ rcu_read_unlock();
+ gf_msg_debug("glusterd", 0, "returning %d", ret);
+ return ret;
}
int
-glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
- uuid_t uuid, dict_t *dict, int *op_errno)
+glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
+ uuid_t uuid, dict_t *dict, int *op_errno)
{
- int ret = -1;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_friend_sm_event_t *event = NULL;
- glusterd_probe_ctx_t *ctx = NULL;
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_probe_ctx_t *ctx = NULL;
- GF_ASSERT (hoststr);
- GF_ASSERT (req);
+ GF_ASSERT(hoststr);
+ GF_ASSERT(req);
- rcu_read_lock ();
+ rcu_read_lock();
- peerinfo = glusterd_peerinfo_find (uuid, hoststr);
- if (peerinfo == NULL) {
- ret = -1;
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_PEER_NOT_FOUND, "Unable to find peerinfo"
- " for host: %s %d", hoststr, port);
- goto out;
- }
-
- if (!peerinfo->rpc) {
- //handle this case
- goto out;
- }
-
- if (peerinfo->detaching) {
- ret = -1;
- if (op_errno)
- *op_errno = GF_DEPROBE_FRIEND_DETACHING;
- goto out;
- }
+ peerinfo = glusterd_peerinfo_find(uuid, hoststr);
+ if (peerinfo == NULL) {
+ ret = -1;
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PEER_NOT_FOUND,
+ "Unable to find peerinfo"
+ " for host: %s %d",
+ hoststr, port);
+ goto out;
+ }
+
+ if (!peerinfo->rpc) {
+ // handle this case
+ goto out;
+ }
+
+ if (peerinfo->detaching) {
+ ret = -1;
+ if (op_errno)
+ *op_errno = GF_DEPROBE_FRIEND_DETACHING;
+ goto out;
+ }
- ret = glusterd_friend_sm_new_event
- (GD_FRIEND_EVENT_INIT_REMOVE_FRIEND, &event);
+ ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_INIT_REMOVE_FRIEND,
+ &event);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_EVENT_NEW_GET_FAIL,
- "Unable to get new event");
- goto out;
- }
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
+ "Unable to get new event");
+ goto out;
+ }
- ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
- if (!ctx) {
- goto out;
- }
+ if (!ctx) {
+ goto out;
+ }
- ctx->hostname = gf_strdup (hoststr);
- ctx->port = port;
- ctx->req = req;
- ctx->dict = dict;
+ ctx->hostname = gf_strdup(hoststr);
+ ctx->port = port;
+ ctx->req = req;
+ ctx->dict = dict;
- event->ctx = ctx;
+ event->ctx = ctx;
- event->peername = gf_strdup (hoststr);
- gf_uuid_copy (event->peerid, uuid);
+ event->peername = gf_strdup(hoststr);
+ gf_uuid_copy(event->peerid, uuid);
- ret = glusterd_friend_sm_inject_event (event);
+ ret = glusterd_friend_sm_inject_event(event);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_EVENT_INJECT_FAIL, "Unable to inject event %d, "
- "ret = %d", event->event, ret);
- goto out;
- }
- peerinfo->detaching = _gf_true;
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
+ "Unable to inject event %d, "
+ "ret = %d",
+ event->event, ret);
+ goto out;
+ }
+ peerinfo->detaching = _gf_true;
out:
- rcu_read_unlock ();
- return ret;
+ rcu_read_unlock();
+ return ret;
}
-
int
-glusterd_xfer_friend_remove_resp (rpcsvc_request_t *req, char *hostname, int port)
+glusterd_xfer_friend_remove_resp(rpcsvc_request_t *req, char *hostname,
+ int port)
{
- gd1_mgmt_friend_rsp rsp = {{0}, };
- int32_t ret = -1;
- xlator_t *this = NULL;
-
- GF_ASSERT (hostname);
+ gd1_mgmt_friend_rsp rsp = {
+ {0},
+ };
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(hostname);
+
+ rsp.op_ret = 0;
+ this = THIS;
+ GF_ASSERT(this);
+
+ gf_uuid_copy(rsp.uuid, MY_UUID);
+ rsp.hostname = hostname;
+ rsp.port = port;
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
+ "Responded to %s (%d), ret: %d", hostname, port, ret);
+ return ret;
+}
- rsp.op_ret = 0;
- this = THIS;
- GF_ASSERT (this);
+int
+glusterd_xfer_friend_add_resp(rpcsvc_request_t *req, char *myhostname,
+ char *remote_hostname, int port, int32_t op_ret,
+ int32_t op_errno)
+{
+ gd1_mgmt_friend_rsp rsp = {
+ {0},
+ };
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(myhostname);
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ gf_uuid_copy(rsp.uuid, MY_UUID);
+ rsp.op_ret = op_ret;
+ rsp.op_errno = op_errno;
+ rsp.hostname = gf_strdup(myhostname);
+ rsp.port = port;
+
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
+
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
+ "Responded to %s (%d), ret: %d, op_ret: %d", remote_hostname, port,
+ ret, op_ret);
+ GF_FREE(rsp.hostname);
+ return ret;
+}
- gf_uuid_copy (rsp.uuid, MY_UUID);
- rsp.hostname = hostname;
- rsp.port = port;
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
+static void
+set_probe_error_str(int op_ret, int op_errno, char *op_errstr, char *errstr,
+ size_t len, char *hostname, int port)
+{
+ if ((op_errstr) && (strcmp(op_errstr, ""))) {
+ snprintf(errstr, len, "%s", op_errstr);
+ return;
+ }
+
+ if (!op_ret) {
+ switch (op_errno) {
+ case GF_PROBE_LOCALHOST:
+ snprintf(errstr, len,
+ "Probe on localhost not "
+ "needed");
+ break;
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_RESPONSE_INFO,
- "Responded to %s (%d), ret: %d", hostname, port, ret);
- return ret;
-}
+ case GF_PROBE_FRIEND:
+ snprintf(errstr, len,
+ "Host %s port %d already"
+ " in peer list",
+ hostname, port);
+ break;
+ case GF_PROBE_FRIEND_DETACHING:
+ snprintf(errstr, len,
+ "Peer is already being "
+ "detached from cluster.\n"
+ "Check peer status by running "
+ "gluster peer status");
+ break;
+ default:
+ if (op_errno != 0)
+ snprintf(errstr, len,
+ "Probe returned "
+ "with %s",
+ strerror(op_errno));
+ break;
+ }
+ } else {
+ switch (op_errno) {
+ case GF_PROBE_ANOTHER_CLUSTER:
+ snprintf(errstr, len,
+ "%s is either already "
+ "part of another cluster or having "
+ "volumes configured",
+ hostname);
+ break;
-int
-glusterd_xfer_friend_add_resp (rpcsvc_request_t *req, char *myhostname,
- char *remote_hostname, int port, int32_t op_ret,
- int32_t op_errno)
-{
- gd1_mgmt_friend_rsp rsp = {{0}, };
- int32_t ret = -1;
- xlator_t *this = NULL;
+ case GF_PROBE_VOLUME_CONFLICT:
+ snprintf(errstr, len,
+ "At least one volume on "
+ "%s conflicts with existing volumes "
+ "in the cluster",
+ hostname);
+ break;
- GF_ASSERT (myhostname);
+ case GF_PROBE_UNKNOWN_PEER:
+ snprintf(errstr, len,
+ "%s responded with "
+ "'unknown peer' error, this could "
+ "happen if %s doesn't have localhost "
+ "in its peer database",
+ hostname, hostname);
+ break;
- this = THIS;
- GF_ASSERT (this);
+ case GF_PROBE_ADD_FAILED:
+ snprintf(errstr, len,
+ "Failed to add peer "
+ "information on %s",
+ hostname);
+ break;
- gf_uuid_copy (rsp.uuid, MY_UUID);
- rsp.op_ret = op_ret;
- rsp.op_errno = op_errno;
- rsp.hostname = gf_strdup (myhostname);
- rsp.port = port;
+ case GF_PROBE_SAME_UUID:
+ snprintf(errstr, len,
+ "Peer uuid (host %s) is "
+ "same as local uuid",
+ hostname);
+ break;
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
+ case GF_PROBE_QUORUM_NOT_MET:
+ snprintf(errstr, len,
+ "Cluster quorum is not "
+ "met. Changing peers is not allowed "
+ "in this state");
+ break;
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_RESPONSE_INFO,
- "Responded to %s (%d), ret: %d, op_ret: %d", remote_hostname,
- port, ret, op_ret);
- GF_FREE (rsp.hostname);
- return ret;
-}
+ case GF_PROBE_MISSED_SNAP_CONFLICT:
+ snprintf(errstr, len,
+ "Failed to update "
+ "list of missed snapshots from "
+ "peer %s",
+ hostname);
+ break;
-static void
-set_probe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
- size_t len, char *hostname, int port)
-{
- if ((op_errstr) && (strcmp (op_errstr, ""))) {
- snprintf (errstr, len, "%s", op_errstr);
- return;
- }
+ case GF_PROBE_SNAP_CONFLICT:
+ snprintf(errstr, len,
+ "Conflict in comparing "
+ "list of snapshots from "
+ "peer %s",
+ hostname);
+ break;
- if (!op_ret) {
- switch (op_errno) {
- case GF_PROBE_LOCALHOST:
- snprintf (errstr, len, "Probe on localhost not "
- "needed");
- break;
-
- case GF_PROBE_FRIEND:
- snprintf (errstr, len, "Host %s port %d already"
- " in peer list", hostname, port);
- break;
-
- case GF_PROBE_FRIEND_DETACHING:
- snprintf (errstr, len, "Peer is already being "
- "detached from cluster.\n"
- "Check peer status by running "
- "gluster peer status");
- break;
- default:
- if (op_errno != 0)
- snprintf (errstr, len, "Probe returned "
- "with %s",
- strerror (op_errno));
- break;
- }
- } else {
- switch (op_errno) {
- case GF_PROBE_ANOTHER_CLUSTER:
- snprintf (errstr, len, "%s is either already "
- "part of another cluster or having "
- "volumes configured", hostname);
- break;
-
- case GF_PROBE_VOLUME_CONFLICT:
- snprintf (errstr, len, "At least one volume on "
- "%s conflicts with existing volumes "
- "in the cluster", hostname);
- break;
-
- case GF_PROBE_UNKNOWN_PEER:
- snprintf (errstr, len, "%s responded with "
- "'unknown peer' error, this could "
- "happen if %s doesn't have localhost "
- "in its peer database", hostname,
- hostname);
- break;
-
- case GF_PROBE_ADD_FAILED:
- snprintf (errstr, len, "Failed to add peer "
- "information on %s", hostname);
- break;
-
- case GF_PROBE_SAME_UUID:
- snprintf (errstr, len, "Peer uuid (host %s) is "
- "same as local uuid", hostname);
- break;
-
- case GF_PROBE_QUORUM_NOT_MET:
- snprintf (errstr, len, "Cluster quorum is not "
- "met. Changing peers is not allowed "
- "in this state");
- break;
-
- case GF_PROBE_MISSED_SNAP_CONFLICT:
- snprintf (errstr, len, "Failed to update "
- "list of missed snapshots from "
- "peer %s", hostname);
- break;
-
- case GF_PROBE_SNAP_CONFLICT:
- snprintf (errstr, len, "Conflict in comparing "
- "list of snapshots from "
- "peer %s", hostname);
- break;
-
- default:
- snprintf (errstr, len, "Probe returned with "
- "%s", strerror (op_errno));
- break;
- }
+ default:
+ snprintf(errstr, len,
+ "Probe returned with "
+ "%s",
+ strerror(op_errno));
+ break;
}
+ }
}
int
-glusterd_xfer_cli_probe_resp (rpcsvc_request_t *req, int32_t op_ret,
- int32_t op_errno, char *op_errstr, char *hostname,
- int port, dict_t *dict)
+glusterd_xfer_cli_probe_resp(rpcsvc_request_t *req, int32_t op_ret,
+ int32_t op_errno, char *op_errstr, char *hostname,
+ int port, dict_t *dict)
{
- gf_cli_rsp rsp = {0,};
- int32_t ret = -1;
- char errstr[2048] = {0,};
- char *cmd_str = NULL;
- xlator_t *this = THIS;
-
- GF_ASSERT (req);
- GF_ASSERT (this);
-
- (void) set_probe_error_str (op_ret, op_errno, op_errstr, errstr,
- sizeof (errstr), hostname, port);
-
- if (dict) {
- ret = dict_get_strn (dict, "cmd-str", SLEN ("cmd-str"),
- &cmd_str);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CMDSTR_NOTFOUND_IN_DICT, "Failed to get "
- "command string");
- }
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ int32_t ret = -1;
+ char errstr[2048] = {
+ 0,
+ };
+ char *cmd_str = NULL;
+ xlator_t *this = THIS;
+
+ GF_ASSERT(req);
+ GF_ASSERT(this);
+
+ (void)set_probe_error_str(op_ret, op_errno, op_errstr, errstr,
+ sizeof(errstr), hostname, port);
+
+ if (dict) {
+ ret = dict_get_strn(dict, "cmd-str", SLEN("cmd-str"), &cmd_str);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CMDSTR_NOTFOUND_IN_DICT,
+ "Failed to get "
+ "command string");
+ }
- rsp.op_ret = op_ret;
- rsp.op_errno = op_errno;
- rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
+ rsp.op_ret = op_ret;
+ rsp.op_errno = op_errno;
+ rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
- gf_cmd_log ("", "%s : %s %s %s", cmd_str,
- (op_ret) ? "FAILED" : "SUCCESS",
- (errstr[0] != '\0') ? ":" : " ",
- (errstr[0] != '\0') ? errstr : " ");
+ gf_cmd_log("", "%s : %s %s %s", cmd_str, (op_ret) ? "FAILED" : "SUCCESS",
+ (errstr[0] != '\0') ? ":" : " ",
+ (errstr[0] != '\0') ? errstr : " ");
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp);
- if (dict)
- dict_unref (dict);
- gf_msg_debug (this->name, 0, "Responded to CLI, ret: %d", ret);
+ if (dict)
+ dict_unref(dict);
+ gf_msg_debug(this->name, 0, "Responded to CLI, ret: %d", ret);
- return ret;
+ return ret;
}
static void
-set_deprobe_error_str (int op_ret, int op_errno, char *op_errstr, char *errstr,
- size_t len, char *hostname)
+set_deprobe_error_str(int op_ret, int op_errno, char *op_errstr, char *errstr,
+ size_t len, char *hostname)
{
- if ((op_errstr) && (strcmp (op_errstr, ""))) {
- snprintf (errstr, len, "%s", op_errstr);
- return;
- }
-
- if (op_ret) {
- switch (op_errno) {
- case GF_DEPROBE_LOCALHOST:
- snprintf (errstr, len, "%s is localhost",
- hostname);
- break;
-
- case GF_DEPROBE_NOT_FRIEND:
- snprintf (errstr, len, "%s is not part of "
- "cluster", hostname);
- break;
-
- case GF_DEPROBE_BRICK_EXIST:
- snprintf (errstr, len, "Brick(s) with the peer "
- "%s exist in cluster", hostname);
- break;
-
- case GF_DEPROBE_SNAP_BRICK_EXIST:
- snprintf (errstr, len, "%s is part of existing "
- "snapshot. Remove those snapshots "
- "before proceeding ", hostname);
- break;
-
- case GF_DEPROBE_FRIEND_DOWN:
- snprintf (errstr, len, "One of the peers is "
- "probably down. Check with "
- "'peer status'");
- break;
-
- case GF_DEPROBE_QUORUM_NOT_MET:
- snprintf (errstr, len, "Cluster quorum is not "
- "met. Changing peers is not allowed "
- "in this state");
- break;
-
- case GF_DEPROBE_FRIEND_DETACHING:
- snprintf (errstr, len, "Peer is already being "
- "detached from cluster.\n"
- "Check peer status by running "
- "gluster peer status");
- break;
- default:
- snprintf (errstr, len, "Detach returned with "
- "%s", strerror (op_errno));
- break;
+ if ((op_errstr) && (strcmp(op_errstr, ""))) {
+ snprintf(errstr, len, "%s", op_errstr);
+ return;
+ }
+
+ if (op_ret) {
+ switch (op_errno) {
+ case GF_DEPROBE_LOCALHOST:
+ snprintf(errstr, len, "%s is localhost", hostname);
+ break;
- }
- }
-}
+ case GF_DEPROBE_NOT_FRIEND:
+ snprintf(errstr, len,
+ "%s is not part of "
+ "cluster",
+ hostname);
+ break;
+ case GF_DEPROBE_BRICK_EXIST:
+ snprintf(errstr, len,
+ "Brick(s) with the peer "
+ "%s exist in cluster",
+ hostname);
+ break;
-int
-glusterd_xfer_cli_deprobe_resp (rpcsvc_request_t *req, int32_t op_ret,
- int32_t op_errno, char *op_errstr,
- char *hostname, dict_t *dict)
-{
- gf_cli_rsp rsp = {0,};
- int32_t ret = -1;
- char *cmd_str = NULL;
- char errstr[2048] = {0,};
+ case GF_DEPROBE_SNAP_BRICK_EXIST:
+ snprintf(errstr, len,
+ "%s is part of existing "
+ "snapshot. Remove those snapshots "
+ "before proceeding ",
+ hostname);
+ break;
- GF_ASSERT (req);
+ case GF_DEPROBE_FRIEND_DOWN:
+ snprintf(errstr, len,
+ "One of the peers is "
+ "probably down. Check with "
+ "'peer status'");
+ break;
- (void) set_deprobe_error_str (op_ret, op_errno, op_errstr, errstr,
- sizeof (errstr), hostname);
+ case GF_DEPROBE_QUORUM_NOT_MET:
+ snprintf(errstr, len,
+ "Cluster quorum is not "
+ "met. Changing peers is not allowed "
+ "in this state");
+ break;
- if (dict) {
- ret = dict_get_strn (dict, "cmd-str", SLEN ("cmd-str"),
- &cmd_str);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_CMDSTR_NOTFOUND_IN_DICT, "Failed to get "
- "command string");
+ case GF_DEPROBE_FRIEND_DETACHING:
+ snprintf(errstr, len,
+ "Peer is already being "
+ "detached from cluster.\n"
+ "Check peer status by running "
+ "gluster peer status");
+ break;
+ default:
+ snprintf(errstr, len,
+ "Detach returned with "
+ "%s",
+ strerror(op_errno));
+ break;
}
+ }
+}
- rsp.op_ret = op_ret;
- rsp.op_errno = op_errno;
- rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
+int
+glusterd_xfer_cli_deprobe_resp(rpcsvc_request_t *req, int32_t op_ret,
+ int32_t op_errno, char *op_errstr,
+ char *hostname, dict_t *dict)
+{
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ int32_t ret = -1;
+ char *cmd_str = NULL;
+ char errstr[2048] = {
+ 0,
+ };
+
+ GF_ASSERT(req);
+
+ (void)set_deprobe_error_str(op_ret, op_errno, op_errstr, errstr,
+ sizeof(errstr), hostname);
+
+ if (dict) {
+ ret = dict_get_strn(dict, "cmd-str", SLEN("cmd-str"), &cmd_str);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_CMDSTR_NOTFOUND_IN_DICT,
+ "Failed to get "
+ "command string");
+ }
- gf_cmd_log ("", "%s : %s %s %s", cmd_str,
- (op_ret) ? "FAILED" : "SUCCESS",
- (errstr[0] != '\0') ? ":" : " ",
- (errstr[0] != '\0') ? errstr : " ");
+ rsp.op_ret = op_ret;
+ rsp.op_errno = op_errno;
+ rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp);
+ gf_cmd_log("", "%s : %s %s %s", cmd_str, (op_ret) ? "FAILED" : "SUCCESS",
+ (errstr[0] != '\0') ? ":" : " ",
+ (errstr[0] != '\0') ? errstr : " ");
- gf_msg_debug (THIS->name, 0, "Responded to CLI, ret: %d", ret);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp);
- return ret;
+ gf_msg_debug(THIS->name, 0, "Responded to CLI, ret: %d", ret);
+
+ return ret;
}
int32_t
-glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
+glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
{
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *entry = NULL;
- int32_t count = 0;
- dict_t *friends = NULL;
- gf1_cli_peer_list_rsp rsp = {0,};
- char my_uuid_str[64] = {0,};
- char key[64] = {0,};
- int keylen;
-
- priv = THIS->private;
- GF_ASSERT (priv);
-
- friends = dict_new ();
- if (!friends) {
- gf_msg (THIS->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY, "Out of Memory");
- goto out;
- }
-
- /* Reset ret to 0, needed to prevent failure in case no peers exist */
- ret = 0;
- rcu_read_lock ();
- if (!cds_list_empty (&priv->peers)) {
- cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
- count++;
- ret = gd_add_peer_detail_to_dict (entry,
- friends, count);
- if (ret)
- goto unlock;
- }
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *entry = NULL;
+ int32_t count = 0;
+ dict_t *friends = NULL;
+ gf1_cli_peer_list_rsp rsp = {
+ 0,
+ };
+ char my_uuid_str[64] = {
+ 0,
+ };
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+
+ priv = THIS->private;
+ GF_ASSERT(priv);
+
+ friends = dict_new();
+ if (!friends) {
+ gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Out of Memory");
+ goto out;
+ }
+
+ /* Reset ret to 0, needed to prevent failure in case no peers exist */
+ ret = 0;
+ rcu_read_lock();
+ if (!cds_list_empty(&priv->peers)) {
+ cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
+ {
+ count++;
+ ret = gd_add_peer_detail_to_dict(entry, friends, count);
+ if (ret)
+ goto unlock;
}
+ }
unlock:
- rcu_read_unlock ();
+ rcu_read_unlock();
+ if (ret)
+ goto out;
+
+ if (flags == GF_CLI_LIST_POOL_NODES) {
+ count++;
+ keylen = snprintf(key, sizeof(key), "friend%d.uuid", count);
+ uuid_utoa_r(MY_UUID, my_uuid_str);
+ ret = dict_set_strn(friends, key, keylen, my_uuid_str);
if (ret)
- goto out;
-
- if (flags == GF_CLI_LIST_POOL_NODES) {
- count++;
- keylen = snprintf (key, sizeof (key), "friend%d.uuid", count);
- uuid_utoa_r (MY_UUID, my_uuid_str);
- ret = dict_set_strn (friends, key, keylen, my_uuid_str);
- if (ret)
- goto out;
+ goto out;
- keylen = snprintf (key, sizeof (key), "friend%d.hostname",
- count);
- ret = dict_set_nstrn (friends, key, keylen,
- "localhost", SLEN ("localhost"));
- if (ret)
- goto out;
-
- keylen = snprintf (key, sizeof (key), "friend%d.connected",
- count);
- ret = dict_set_int32n (friends, key, keylen, 1);
- if (ret)
- goto out;
- }
+ keylen = snprintf(key, sizeof(key), "friend%d.hostname", count);
+ ret = dict_set_nstrn(friends, key, keylen, "localhost",
+ SLEN("localhost"));
+ if (ret)
+ goto out;
- ret = dict_set_int32n (friends, "count", SLEN ("count"), count);
+ keylen = snprintf(key, sizeof(key), "friend%d.connected", count);
+ ret = dict_set_int32n(friends, key, keylen, 1);
if (ret)
- goto out;
+ goto out;
+ }
- ret = dict_allocate_and_serialize (friends, &rsp.friends.friends_val,
- &rsp.friends.friends_len);
+ ret = dict_set_int32n(friends, "count", SLEN("count"), count);
+ if (ret)
+ goto out;
- if (ret)
- goto out;
+ ret = dict_allocate_and_serialize(friends, &rsp.friends.friends_val,
+ &rsp.friends.friends_len);
- ret = 0;
+ if (ret)
+ goto out;
+
+ ret = 0;
out:
- if (friends)
- dict_unref (friends);
+ if (friends)
+ dict_unref(friends);
- rsp.op_ret = ret;
+ rsp.op_ret = ret;
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
- ret = 0;
- GF_FREE (rsp.friends.friends_val);
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
+ ret = 0;
+ GF_FREE(rsp.friends.friends_val);
- return ret;
+ return ret;
}
int32_t
-glusterd_get_volumes (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
+glusterd_get_volumes(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
{
- int32_t ret = -1;
- int32_t ret_bkp = 0;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *entry = NULL;
- int32_t count = 0;
- dict_t *volumes = NULL;
- gf_cli_rsp rsp = {0,};
- char *volname = NULL;
-
- priv = THIS->private;
- GF_ASSERT (priv);
- volumes = dict_new ();
- if (!volumes) {
- gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY, "Out of Memory");
- goto out;
- }
-
- if (cds_list_empty (&priv->volumes)) {
- if (flags == GF_CLI_GET_VOLUME)
- ret_bkp = -1;
- ret = 0;
+ int32_t ret = -1;
+ int32_t ret_bkp = 0;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *entry = NULL;
+ int32_t count = 0;
+ dict_t *volumes = NULL;
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ char *volname = NULL;
+
+ priv = THIS->private;
+ GF_ASSERT(priv);
+ volumes = dict_new();
+ if (!volumes) {
+ gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Out of Memory");
+ goto out;
+ }
+
+ if (cds_list_empty(&priv->volumes)) {
+ if (flags == GF_CLI_GET_VOLUME)
+ ret_bkp = -1;
+ ret = 0;
+ goto respond;
+ }
+ if (flags == GF_CLI_GET_VOLUME_ALL) {
+ cds_list_for_each_entry(entry, &priv->volumes, vol_list)
+ {
+ ret = glusterd_add_volume_detail_to_dict(entry, volumes, count);
+ if (ret)
goto respond;
- }
- if (flags == GF_CLI_GET_VOLUME_ALL) {
- cds_list_for_each_entry (entry, &priv->volumes, vol_list) {
- ret = glusterd_add_volume_detail_to_dict (entry,
- volumes, count);
- if (ret)
- goto respond;
-
- count++;
- }
-
- } else if (flags == GF_CLI_GET_NEXT_VOLUME) {
- ret = dict_get_strn (dict, "volname", SLEN ("volname"),
- &volname);
-
- if (ret) {
- if (priv->volumes.next) {
- entry = cds_list_entry (priv->volumes.next,
- typeof (*entry),
- vol_list);
- }
- } else {
- ret = glusterd_volinfo_find (volname, &entry);
- if (ret)
- goto respond;
- entry = cds_list_entry (entry->vol_list.next,
- typeof (*entry),
- vol_list);
- }
-
- if (&entry->vol_list == &priv->volumes) {
- goto respond;
- } else {
- ret = glusterd_add_volume_detail_to_dict (entry,
- volumes, count);
- if (ret)
- goto respond;
-
- count++;
- }
- } else if (flags == GF_CLI_GET_VOLUME) {
- ret = dict_get_strn (dict, "volname", SLEN ("volname"),
- &volname);
-
- if (ret)
- goto respond;
-
- ret = glusterd_volinfo_find (volname, &entry);
- if (ret) {
- ret_bkp = ret;
- goto respond;
- }
-
- ret = glusterd_add_volume_detail_to_dict (entry,
- volumes, count);
- if (ret)
- goto respond;
-
- count++;
+ count++;
}
-respond:
- ret = dict_set_int32n (volumes, "count", SLEN ("count"), count);
- if (ret)
- goto out;
- ret = dict_allocate_and_serialize (volumes, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
+ } else if (flags == GF_CLI_GET_NEXT_VOLUME) {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (ret)
- goto out;
-
- ret = 0;
-out:
- if (ret_bkp == -1) {
- rsp.op_ret = ret_bkp;
- rsp.op_errstr = "Volume does not exist";
- rsp.op_errno = EG_NOVOL;
+ if (ret) {
+ if (priv->volumes.next) {
+ entry = cds_list_entry(priv->volumes.next, typeof(*entry),
+ vol_list);
+ }
} else {
- rsp.op_ret = ret;
- rsp.op_errstr = "";
+ ret = glusterd_volinfo_find(volname, &entry);
+ if (ret)
+ goto respond;
+ entry = cds_list_entry(entry->vol_list.next, typeof(*entry),
+ vol_list);
}
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp);
- ret = 0;
- if (volumes)
- dict_unref (volumes);
-
- GF_FREE (rsp.dict.dict_val);
- return ret;
-}
-
-int
-__glusterd_handle_status_volume (rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- uint32_t cmd = 0;
- dict_t *dict = NULL;
- char *volname = 0;
- gf_cli_req cli_req = {{0,}};
- glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
- char err_str[256] = {0,};
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
+ if (&entry->vol_list == &priv->volumes) {
+ goto respond;
+ } else {
+ ret = glusterd_add_volume_detail_to_dict(entry, volumes, count);
+ if (ret)
+ goto respond;
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- //failed to decode msg;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
+ count++;
}
+ } else if (flags == GF_CLI_GET_VOLUME) {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- if (cli_req.dict.dict_len > 0) {
- dict = dict_new();
- if (!dict)
- goto out;
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len, &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to "
- "unserialize buffer");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- }
+ if (ret)
+ goto respond;
+ ret = glusterd_volinfo_find(volname, &entry);
+ if (ret) {
+ ret_bkp = ret;
+ goto respond;
}
- ret = dict_get_uint32 (dict, "cmd", &cmd);
+ ret = glusterd_add_volume_detail_to_dict(entry, volumes, count);
if (ret)
- goto out;
+ goto respond;
- if (!(cmd & GF_CLI_STATUS_ALL)) {
- ret = dict_get_strn (dict, "volname", SLEN ("volname"),
- &volname);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get "
- "volume name");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", err_str);
- goto out;
- }
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_STATUS_VOL_REQ_RCVD,
- "Received status volume req for volume %s", volname);
+ count++;
+ }
- }
- if ((cmd & GF_CLI_STATUS_CLIENT_LIST) &&
- (conf->op_version < GD_OP_VERSION_3_13_0)) {
- snprintf (err_str, sizeof (err_str), "The cluster is operating "
- "at version less than %d. Getting the client-list "
- "is not allowed in this state.",
- GD_OP_VERSION_3_13_0);
- ret = -1;
- goto out;
- }
+respond:
+ ret = dict_set_int32n(volumes, "count", SLEN("count"), count);
+ if (ret)
+ goto out;
+ ret = dict_allocate_and_serialize(volumes, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
- if ((cmd & GF_CLI_STATUS_QUOTAD) &&
- (conf->op_version == GD_OP_VERSION_MIN)) {
- snprintf (err_str, sizeof (err_str), "The cluster is operating "
- "at version 1. Getting the status of quotad is not "
- "allowed in this state.");
- ret = -1;
- goto out;
- }
+ if (ret)
+ goto out;
- if ((cmd & GF_CLI_STATUS_SNAPD) &&
- (conf->op_version < GD_OP_VERSION_3_6_0)) {
- snprintf (err_str, sizeof (err_str), "The cluster is operating "
- "at a lesser version than %d. Getting the status of "
- "snapd is not allowed in this state",
- GD_OP_VERSION_3_6_0);
- ret = -1;
- goto out;
- }
+ ret = 0;
+out:
+ if (ret_bkp == -1) {
+ rsp.op_ret = ret_bkp;
+ rsp.op_errstr = "Volume does not exist";
+ rsp.op_errno = EG_NOVOL;
+ } else {
+ rsp.op_ret = ret;
+ rsp.op_errstr = "";
+ }
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
+ ret = 0;
- if ((cmd & GF_CLI_STATUS_BITD) &&
- (conf->op_version < GD_OP_VERSION_3_7_0)) {
- snprintf (err_str, sizeof (err_str), "The cluster is operating "
- "at a lesser version than %d. Getting the status of "
- "bitd is not allowed in this state",
- GD_OP_VERSION_3_7_0);
- ret = -1;
- goto out;
- }
+ if (volumes)
+ dict_unref(volumes);
- if ((cmd & GF_CLI_STATUS_TIERD) &&
- (conf->op_version < GD_OP_VERSION_3_10_0)) {
- snprintf (err_str, sizeof (err_str), "The cluster is operating "
- "at a lesser version than %d. Getting the status of "
- "tierd is not allowed in this state",
- GD_OP_VERSION_3_6_0);
- ret = -1;
- goto out;
- }
+ GF_FREE(rsp.dict.dict_val);
+ return ret;
+}
- if ((cmd & GF_CLI_STATUS_SCRUB) &&
- (conf->op_version < GD_OP_VERSION_3_7_0)) {
- snprintf (err_str, sizeof (err_str), "The cluster is operating "
- "at a lesser version than %d. Getting the status of "
- "scrub is not allowed in this state",
- GD_OP_VERSION_3_7_0);
- ret = -1;
- goto out;
- }
+int
+__glusterd_handle_status_volume(rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ uint32_t cmd = 0;
+ dict_t *dict = NULL;
+ char *volname = 0;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
+ char err_str[256] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len > 0) {
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize buffer");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ }
+ }
+
+ ret = dict_get_uint32(dict, "cmd", &cmd);
+ if (ret)
+ goto out;
+
+ if (!(cmd & GF_CLI_STATUS_ALL)) {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get "
+ "volume name");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
+ err_str);
+ goto out;
+ }
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_STATUS_VOL_REQ_RCVD,
+ "Received status volume req for volume %s", volname);
+ }
+ if ((cmd & GF_CLI_STATUS_CLIENT_LIST) &&
+ (conf->op_version < GD_OP_VERSION_3_13_0)) {
+ snprintf(err_str, sizeof(err_str),
+ "The cluster is operating "
+ "at version less than %d. Getting the client-list "
+ "is not allowed in this state.",
+ GD_OP_VERSION_3_13_0);
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_QUOTAD) &&
+ (conf->op_version == GD_OP_VERSION_MIN)) {
+ snprintf(err_str, sizeof(err_str),
+ "The cluster is operating "
+ "at version 1. Getting the status of quotad is not "
+ "allowed in this state.");
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_SNAPD) &&
+ (conf->op_version < GD_OP_VERSION_3_6_0)) {
+ snprintf(err_str, sizeof(err_str),
+ "The cluster is operating "
+ "at a lesser version than %d. Getting the status of "
+ "snapd is not allowed in this state",
+ GD_OP_VERSION_3_6_0);
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_BITD) &&
+ (conf->op_version < GD_OP_VERSION_3_7_0)) {
+ snprintf(err_str, sizeof(err_str),
+ "The cluster is operating "
+ "at a lesser version than %d. Getting the status of "
+ "bitd is not allowed in this state",
+ GD_OP_VERSION_3_7_0);
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_TIERD) &&
+ (conf->op_version < GD_OP_VERSION_3_10_0)) {
+ snprintf(err_str, sizeof(err_str),
+ "The cluster is operating "
+ "at a lesser version than %d. Getting the status of "
+ "tierd is not allowed in this state",
+ GD_OP_VERSION_3_6_0);
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_SCRUB) &&
+ (conf->op_version < GD_OP_VERSION_3_7_0)) {
+ snprintf(err_str, sizeof(err_str),
+ "The cluster is operating "
+ "at a lesser version than %d. Getting the status of "
+ "scrub is not allowed in this state",
+ GD_OP_VERSION_3_7_0);
+ ret = -1;
+ goto out;
+ }
- ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
+ ret = glusterd_op_begin_synctask(req, GD_OP_STATUS_VOLUME, dict);
out:
- if (ret) {
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
- }
- free (cli_req.dict.dict_val);
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
+ free(cli_req.dict.dict_val);
- return ret;
+ return ret;
}
int
-glusterd_handle_status_volume (rpcsvc_request_t *req)
+glusterd_handle_status_volume(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_status_volume);
+ return glusterd_big_locked_handler(req, __glusterd_handle_status_volume);
}
int
-__glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
+__glusterd_handle_cli_clearlocks_volume(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME;
- char *volname = NULL;
- dict_t *dict = NULL;
- char err_str[64] = {0,};
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME;
+ char *volname = NULL;
+ dict_t *dict = NULL;
+ char err_str[64] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = -1;
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ dict = dict_new();
- ret = -1;
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to unserialize req-buffer to"
+ " dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "unable to decode "
+ "the command");
+ goto out;
}
- if (cli_req.dict.dict_len) {
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize req-buffer to"
- " dictionary");
- snprintf (err_str, sizeof (err_str), "unable to decode "
- "the command");
- goto out;
- }
-
- } else {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CLI_REQ_EMPTY, "Empty cli request.");
- goto out;
- }
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Unable to get volume "
- "name");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
- goto out;
- }
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_CLRCLK_VOL_REQ_RCVD, "Received clear-locks volume req "
- "for volume %s", volname);
-
- ret = glusterd_op_begin_synctask (req, GD_OP_CLEARLOCKS_VOLUME, dict);
+ } else {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLI_REQ_EMPTY,
+ "Empty cli request.");
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volume "
+ "name");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
+ "%s", err_str);
+ goto out;
+ }
+
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD,
+ "Received clear-locks volume req "
+ "for volume %s",
+ volname);
+
+ ret = glusterd_op_begin_synctask(req, GD_OP_CLEARLOCKS_VOLUME, dict);
out:
- if (ret) {
- if (err_str[0] == '\0')
- snprintf (err_str, sizeof (err_str),
- "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, err_str);
- }
- free (cli_req.dict.dict_val);
-
- return ret;
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
+ free(cli_req.dict.dict_val);
+
+ return ret;
}
int
-glusterd_handle_cli_clearlocks_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_clearlocks_volume(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_cli_clearlocks_volume);
+ return glusterd_big_locked_handler(req,
+ __glusterd_handle_cli_clearlocks_volume);
}
static int
-get_volinfo_from_brickid (char *brickid, glusterd_volinfo_t **volinfo)
+get_volinfo_from_brickid(char *brickid, glusterd_volinfo_t **volinfo)
{
- int ret = -1;
- char *volid_str = NULL;
- char *brick = NULL;
- char *brickid_dup = NULL;
- uuid_t volid = {0};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (brickid);
-
- brickid_dup = gf_strdup (brickid);
- if (!brickid_dup)
- goto out;
-
- volid_str = brickid_dup;
- brick = strchr (brickid_dup, ':');
- if (!brick) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_NOT_FOUND,
- "Invalid brickid");
- goto out;
- }
-
- *brick = '\0';
- brick++;
- gf_uuid_parse (volid_str, volid);
- ret = glusterd_volinfo_find_by_volume_id (volid, volinfo);
+ int ret = -1;
+ char *volid_str = NULL;
+ char *brick = NULL;
+ char *brickid_dup = NULL;
+ uuid_t volid = {0};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(brickid);
+
+ brickid_dup = gf_strdup(brickid);
+ if (!brickid_dup)
+ goto out;
+
+ volid_str = brickid_dup;
+ brick = strchr(brickid_dup, ':');
+ if (!brick) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
+ "Invalid brickid");
+ goto out;
+ }
+
+ *brick = '\0';
+ brick++;
+ gf_uuid_parse(volid_str, volid);
+ ret = glusterd_volinfo_find_by_volume_id(volid, volinfo);
+ if (ret) {
+ /* Check if it is a snapshot volume */
+ ret = glusterd_snap_volinfo_find_by_volume_id(volid, volinfo);
if (ret) {
- /* Check if it is a snapshot volume */
- ret = glusterd_snap_volinfo_find_by_volume_id (volid, volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "Failed to find volinfo");
- goto out;
- }
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to find volinfo");
+ goto out;
}
+ }
- ret = 0;
+ ret = 0;
out:
- GF_FREE (brickid_dup);
- return ret;
+ GF_FREE(brickid_dup);
+ return ret;
}
static int
-__glusterd_handle_barrier (rpcsvc_request_t *req)
+__glusterd_handle_barrier(rpcsvc_request_t *req)
{
- int ret = -1;
- xlator_t *this = NULL;
- gf_cli_req cli_req = {{0,}};
- dict_t *dict = NULL;
- char *volname = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT(this);
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode "
- "request received from cli");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (!cli_req.dict.dict_len) {
- ret = -1;
- goto out;
- }
-
- dict = dict_new();
- if (!dict) {
- ret = -1;
- goto out;
- }
- ret = dict_unserialize (cli_req.dict.dict_val, cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL, "Failed to unserialize "
- "request dictionary.");
- goto out;
- }
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
- "Volname not present in "
- "dict");
- goto out;
- }
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_BARRIER_VOL_REQ_RCVD,
- "Received barrier volume request for "
- "volume %s", volname);
+ int ret = -1;
+ xlator_t *this = NULL;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ char *volname = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode "
+ "request received from cli");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (!cli_req.dict.dict_len) {
+ ret = -1;
+ goto out;
+ }
- ret = glusterd_op_begin_synctask (req, GD_OP_BARRIER, dict);
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "Failed to unserialize "
+ "request dictionary.");
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
+ "Volname not present in "
+ "dict");
+ goto out;
+ }
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BARRIER_VOL_REQ_RCVD,
+ "Received barrier volume request for "
+ "volume %s",
+ volname);
+
+ ret = glusterd_op_begin_synctask(req, GD_OP_BARRIER, dict);
out:
- if (ret) {
- ret = glusterd_op_send_cli_response (GD_OP_BARRIER, ret, 0, req,
- dict, "Operation failed");
- }
- free (cli_req.dict.dict_val);
- return ret;
+ if (ret) {
+ ret = glusterd_op_send_cli_response(GD_OP_BARRIER, ret, 0, req, dict,
+ "Operation failed");
+ }
+ free(cli_req.dict.dict_val);
+ return ret;
}
int
-glusterd_handle_barrier (rpcsvc_request_t *req)
+glusterd_handle_barrier(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_barrier);
+ return glusterd_big_locked_handler(req, __glusterd_handle_barrier);
}
static gf_boolean_t
-gd_is_global_option (char *opt_key)
+gd_is_global_option(char *opt_key)
{
- GF_VALIDATE_OR_GOTO (THIS->name, opt_key, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, opt_key, out);
- return (strcmp (opt_key, GLUSTERD_SHARED_STORAGE_KEY) == 0 ||
- strcmp (opt_key, GLUSTERD_QUORUM_RATIO_KEY) == 0 ||
- strcmp (opt_key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0 ||
- strcmp (opt_key, GLUSTERD_BRICK_MULTIPLEX_KEY) == 0 ||
- strcmp (opt_key, GLUSTERD_LOCALTIME_LOGGING_KEY) == 0 ||
- strcmp (opt_key, GLUSTERD_DAEMON_LOG_LEVEL_KEY) == 0 ||
- strcmp (opt_key, GLUSTERD_MAX_OP_VERSION_KEY) == 0);
+ return (strcmp(opt_key, GLUSTERD_SHARED_STORAGE_KEY) == 0 ||
+ strcmp(opt_key, GLUSTERD_QUORUM_RATIO_KEY) == 0 ||
+ strcmp(opt_key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0 ||
+ strcmp(opt_key, GLUSTERD_BRICK_MULTIPLEX_KEY) == 0 ||
+ strcmp(opt_key, GLUSTERD_LOCALTIME_LOGGING_KEY) == 0 ||
+ strcmp(opt_key, GLUSTERD_DAEMON_LOG_LEVEL_KEY) == 0 ||
+ strcmp(opt_key, GLUSTERD_MAX_OP_VERSION_KEY) == 0);
out:
- return _gf_false;
+ return _gf_false;
}
int32_t
-glusterd_get_volume_opts (rpcsvc_request_t *req, dict_t *dict)
+glusterd_get_volume_opts(rpcsvc_request_t *req, dict_t *dict)
{
- int32_t ret = -1;
- int32_t count = 1;
- int exists = 0;
- char *key = NULL;
- char *orig_key = NULL;
- char *key_fixed = NULL;
- char *volname = NULL;
- char *value = NULL;
- char err_str[2048] = {0,};
- char dict_key[50] = {0,};
- int keylen;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- gf_cli_rsp rsp = {0,};
- char op_version_buff[10] = {0,};
-
- this = THIS;
- GF_ASSERT (this);
-
- priv = this->private;
- GF_ASSERT (priv);
-
- GF_ASSERT (req);
- GF_ASSERT (dict);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Failed to get volume "
- "name while handling get volume option command");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLNAME_NOTFOUND_IN_DICT, "%s", err_str);
- goto out;
- }
-
- if (strcasecmp (volname, "all") == 0) {
- ret = glusterd_get_global_options_for_all_vols (req, dict,
- &rsp.op_errstr);
- goto out;
- }
-
-
- ret = dict_get_strn (dict, "key", SLEN ("key"), &key);
- if (ret) {
- snprintf (err_str, sizeof (err_str), "Failed to get key "
- "while handling get volume option for %s", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- goto out;
- }
- gf_msg_debug (this->name, 0, "Received get volume opt request for "
- "volume %s", volname);
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (err_str, sizeof(err_str),
- FMTSTR_CHECK_VOL_EXISTS, volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, FMTSTR_CHECK_VOL_EXISTS,
- volname);
+ int32_t ret = -1;
+ int32_t count = 1;
+ int exists = 0;
+ char *key = NULL;
+ char *orig_key = NULL;
+ char *key_fixed = NULL;
+ char *volname = NULL;
+ char *value = NULL;
+ char err_str[2048] = {
+ 0,
+ };
+ char dict_key[50] = {
+ 0,
+ };
+ int keylen;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ char op_version_buff[10] = {
+ 0,
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ GF_ASSERT(req);
+ GF_ASSERT(dict);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to get volume "
+ "name while handling get volume option command");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
+ "%s", err_str);
+ goto out;
+ }
+
+ if (strcasecmp(volname, "all") == 0) {
+ ret = glusterd_get_global_options_for_all_vols(req, dict,
+ &rsp.op_errstr);
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "key", SLEN("key"), &key);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to get key "
+ "while handling get volume option for %s",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ goto out;
+ }
+ gf_msg_debug(this->name, 0,
+ "Received get volume opt request for "
+ "volume %s",
+ volname);
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str), FMTSTR_CHECK_VOL_EXISTS, volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+ if (strcmp(key, "all")) {
+ if (fnmatch(GD_HOOKS_SPECIFIC_KEY, key, FNM_NOESCAPE) == 0) {
+ keylen = sprintf(dict_key, "key%d", count);
+ ret = dict_set_strn(dict, dict_key, keylen, key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to "
+ "set %s in dictionary",
+ key);
+ goto out;
+ }
+ ret = dict_get_str(volinfo->dict, key, &value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to "
+ "get %s in dictionary",
+ key);
+ goto out;
+ }
+ keylen = sprintf(dict_key, "value%d", count);
+ ret = dict_set_strn(dict, dict_key, keylen, value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to "
+ "set %s in dictionary",
+ key);
+ goto out;
+ }
+ } else {
+ exists = glusterd_check_option_exists(key, &key_fixed);
+ if (!exists) {
+ snprintf(err_str, sizeof(err_str),
+ "Option "
+ "with name: %s does not exist",
+ key);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_UNKNOWN_KEY,
+ "%s", err_str);
+ if (key_fixed)
+ snprintf(err_str + ret, sizeof(err_str) - ret,
+ "Did you mean %s?", key_fixed);
+ ret = -1;
goto out;
- }
- if (strcmp(key, "all")) {
- if (fnmatch (GD_HOOKS_SPECIFIC_KEY, key, FNM_NOESCAPE) == 0) {
- keylen = sprintf (dict_key, "key%d", count);
- ret = dict_set_strn (dict, dict_key, keylen, key);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to "
- "set %s in dictionary", key);
- goto out;
- }
- ret = dict_get_str (volinfo->dict, key, &value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Failed to "
- "get %s in dictionary", key);
- goto out;
- }
- keylen = sprintf (dict_key, "value%d", count);
- ret = dict_set_strn (dict, dict_key, keylen, value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to "
- "set %s in dictionary", key);
- goto out;
- }
- } else {
- exists = glusterd_check_option_exists (key,
- &key_fixed);
- if (!exists) {
- snprintf (err_str, sizeof (err_str), "Option "
- "with name: %s does not exist", key);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_UNKNOWN_KEY, "%s",
- err_str);
- if (key_fixed)
- snprintf (err_str + ret,
- sizeof (err_str) - ret,
- "Did you mean %s?",
- key_fixed);
- ret = -1;
- goto out;
- }
- if (key_fixed) {
- orig_key = key;
- key = key_fixed;
- }
+ }
+ if (key_fixed) {
+ orig_key = key;
+ key = key_fixed;
+ }
- if (gd_is_global_option (key)) {
- char warn_str[] = "Warning: support to get \
+ if (gd_is_global_option(key)) {
+ char warn_str[] =
+ "Warning: support to get \
global option value using volume get \
<volname>` will be deprecated from \
next release. Consider using `volume \
get all` instead for global options";
- ret = dict_set_strn (dict, "warning",
- SLEN ("warning"),
- warn_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- 0, GD_MSG_DICT_SET_FAILED,
- "Failed to set warning "
- "message in dictionary");
- goto out;
- }
- }
+ ret = dict_set_strn(dict, "warning", SLEN("warning"), warn_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set warning "
+ "message in dictionary");
+ goto out;
+ }
+ }
- if (strcmp (key, GLUSTERD_MAX_OP_VERSION_KEY) == 0) {
- ret = glusterd_get_global_max_op_version (req, dict, 1);
- if (ret)
- goto out;
- } else if (strcmp (key,
- GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
- keylen = sprintf (dict_key, "key%d", count);
- ret = dict_set_strn (dict, dict_key, keylen,
- key);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed"
- "to set %s in dictionary", key);
- goto out;
- }
- keylen = sprintf (dict_key, "value%d", count);
- sprintf (op_version_buff, "%d",
- priv->op_version);
- ret = dict_set_strn (dict, dict_key, keylen,
- op_version_buff);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed"
- " to set value for key %s in "
- "dictionary", key);
- goto out;
- }
- } else if (strcmp (key,
- "config.memory-accounting") == 0) {
- keylen = sprintf (dict_key, "key%d", count);
- ret = dict_set_strn (dict, dict_key, keylen,
- key);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed"
- " to set %s in dictionary",
- key);
- goto out;
- }
- keylen = sprintf (dict_key, "value%d", count);
-
- if (volinfo->memory_accounting)
- ret = dict_set_nstrn (dict, dict_key,
- keylen,
- "Enabled",
- SLEN ("Enabled"));
- else
- ret = dict_set_nstrn (dict, dict_key,
- keylen,
- "Disabled",
- SLEN ("Disabled"));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed"
- " to set value for key %s in "
- "dictionary", key);
- goto out;
- }
- } else if (strcmp (key, "config.transport") == 0) {
- keylen = sprintf (dict_key, "key%d", count);
- ret = dict_set_strn (dict, dict_key, keylen,
- key);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set %s in "
- "dictionary", key);
- goto out;
- }
- keylen = sprintf (dict_key, "value%d", count);
-
- if (volinfo->transport_type
- == GF_TRANSPORT_RDMA)
- ret = dict_set_nstrn (dict, dict_key,
- keylen, "rdma",
- SLEN ("rdma"));
- else if (volinfo->transport_type
- == GF_TRANSPORT_TCP)
- ret = dict_set_nstrn (dict, dict_key,
- keylen, "tcp",
- SLEN ("tcp"));
- else if (volinfo->transport_type ==
- GF_TRANSPORT_BOTH_TCP_RDMA)
- ret = dict_set_nstrn (dict, dict_key,
- keylen,
- "tcp,rdma",
- SLEN ("tcp,rdma"));
- else
- ret = dict_set_nstrn (dict, dict_key,
- keylen, "none",
- SLEN ("none"));
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set value for key "
- "%s in dictionary", key);
- goto out;
- }
- } else {
- keylen = sprintf (dict_key, "key%d", count);
- ret = dict_set_strn (dict, dict_key, keylen,
- key);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set %s in "
- "dictionary", key);
- goto out;
- }
- keylen = sprintf (dict_key, "value%d", count);
- ret = dict_get_str (priv->opts, key, &value);
- if (!ret) {
- ret = dict_set_strn (dict, dict_key,
- keylen, value);
- if (ret) {
- gf_msg (this->name,
- GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set %s in "
- " dictionary", key);
- goto out;
- }
- } else {
- ret = glusterd_get_default_val_for_volopt
- (dict,
- _gf_false,
- key, orig_key,
- volinfo,
- &rsp.op_errstr);
- if (ret && !rsp.op_errstr) {
- snprintf (err_str,
- sizeof(err_str),
- "Failed to fetch the "
- "value of %s, check "
- "log file for more"
- " details", key);
- }
- }
- }
+ if (strcmp(key, GLUSTERD_MAX_OP_VERSION_KEY) == 0) {
+ ret = glusterd_get_global_max_op_version(req, dict, 1);
+ if (ret)
+ goto out;
+ } else if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
+ keylen = sprintf(dict_key, "key%d", count);
+ ret = dict_set_strn(dict, dict_key, keylen, key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed"
+ "to set %s in dictionary",
+ key);
+ goto out;
}
- /* Request is for a single option, explicitly set count to 1
- * in the dictionary.
- */
- ret = dict_set_int32n (dict, "count", SLEN ("count"), 1);
+ keylen = sprintf(dict_key, "value%d", count);
+ sprintf(op_version_buff, "%d", priv->op_version);
+ ret = dict_set_strn(dict, dict_key, keylen, op_version_buff);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set count "
- "value in the dictionary");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed"
+ " to set value for key %s in "
+ "dictionary",
+ key);
+ goto out;
}
- } else {
- /* Handle the "all" volume option request */
- ret = glusterd_get_default_val_for_volopt (dict, _gf_true, NULL,
- NULL, volinfo,
- &rsp.op_errstr);
- if (ret && !rsp.op_errstr) {
- snprintf (err_str, sizeof(err_str),
- "Failed to fetch the value of all volume "
- "options, check log file for more details");
+ } else if (strcmp(key, "config.memory-accounting") == 0) {
+ keylen = sprintf(dict_key, "key%d", count);
+ ret = dict_set_strn(dict, dict_key, keylen, key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed"
+ " to set %s in dictionary",
+ key);
+ goto out;
+ }
+ keylen = sprintf(dict_key, "value%d", count);
+
+ if (volinfo->memory_accounting)
+ ret = dict_set_nstrn(dict, dict_key, keylen, "Enabled",
+ SLEN("Enabled"));
+ else
+ ret = dict_set_nstrn(dict, dict_key, keylen, "Disabled",
+ SLEN("Disabled"));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed"
+ " to set value for key %s in "
+ "dictionary",
+ key);
+ goto out;
+ }
+ } else if (strcmp(key, "config.transport") == 0) {
+ keylen = sprintf(dict_key, "key%d", count);
+ ret = dict_set_strn(dict, dict_key, keylen, key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set %s in "
+ "dictionary",
+ key);
+ goto out;
}
+ keylen = sprintf(dict_key, "value%d", count);
+
+ if (volinfo->transport_type == GF_TRANSPORT_RDMA)
+ ret = dict_set_nstrn(dict, dict_key, keylen, "rdma",
+ SLEN("rdma"));
+ else if (volinfo->transport_type == GF_TRANSPORT_TCP)
+ ret = dict_set_nstrn(dict, dict_key, keylen, "tcp",
+ SLEN("tcp"));
+ else if (volinfo->transport_type == GF_TRANSPORT_BOTH_TCP_RDMA)
+ ret = dict_set_nstrn(dict, dict_key, keylen, "tcp,rdma",
+ SLEN("tcp,rdma"));
+ else
+ ret = dict_set_nstrn(dict, dict_key, keylen, "none",
+ SLEN("none"));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set value for key "
+ "%s in dictionary",
+ key);
+ goto out;
+ }
+ } else {
+ keylen = sprintf(dict_key, "key%d", count);
+ ret = dict_set_strn(dict, dict_key, keylen, key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set %s in "
+ "dictionary",
+ key);
+ goto out;
+ }
+ keylen = sprintf(dict_key, "value%d", count);
+ ret = dict_get_str(priv->opts, key, &value);
+ if (!ret) {
+ ret = dict_set_strn(dict, dict_key, keylen, value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_SET_FAILED,
+ "Failed to set %s in "
+ " dictionary",
+ key);
+ goto out;
+ }
+ } else {
+ ret = glusterd_get_default_val_for_volopt(
+ dict, _gf_false, key, orig_key, volinfo,
+ &rsp.op_errstr);
+ if (ret && !rsp.op_errstr) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to fetch the "
+ "value of %s, check "
+ "log file for more"
+ " details",
+ key);
+ }
+ }
+ }
}
+ /* Request is for a single option, explicitly set count to 1
+ * in the dictionary.
+ */
+ ret = dict_set_int32n(dict, "count", SLEN("count"), 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set count "
+ "value in the dictionary");
+ goto out;
+ }
+ } else {
+ /* Handle the "all" volume option request */
+ ret = glusterd_get_default_val_for_volopt(dict, _gf_true, NULL, NULL,
+ volinfo, &rsp.op_errstr);
+ if (ret && !rsp.op_errstr) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to fetch the value of all volume "
+ "options, check log file for more details");
+ }
+ }
out:
- if (ret) {
- if (!rsp.op_errstr)
- rsp.op_errstr = err_str;
- rsp.op_ret = ret;
- }
- else {
- rsp.op_errstr = "";
- rsp.op_ret = 0;
- }
+ if (ret) {
+ if (!rsp.op_errstr)
+ rsp.op_errstr = err_str;
+ rsp.op_ret = ret;
+ } else {
+ rsp.op_errstr = "";
+ rsp.op_ret = 0;
+ }
- ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
+ ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp);
- return ret;
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
+ return ret;
}
int
-__glusterd_handle_get_vol_opt (rpcsvc_request_t *req)
+__glusterd_handle_get_vol_opt(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- dict_t *dict = NULL;
- char err_str[64] = {0,};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (req);
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ char err_str[64] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to decode "
+ "request received from cli");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
+ err_str);
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- snprintf (err_str, sizeof (err_str), "Failed to decode "
- "request received from cli");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode "
- "the command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
- ret = glusterd_get_volume_opts (req, dict);
+ }
+ ret = glusterd_get_volume_opts(req, dict);
out:
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- return ret;
+ return ret;
}
int
-glusterd_handle_get_vol_opt (rpcsvc_request_t *req)
+glusterd_handle_get_vol_opt(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __glusterd_handle_get_vol_opt);
+ return glusterd_big_locked_handler(req, __glusterd_handle_get_vol_opt);
}
extern struct rpc_clnt_program gd_brick_prog;
static int
-glusterd_print_global_options (dict_t *opts, char *key, data_t *val, void *data)
+glusterd_print_global_options(dict_t *opts, char *key, data_t *val, void *data)
{
- FILE *fp = NULL;
+ FILE *fp = NULL;
- GF_VALIDATE_OR_GOTO (THIS->name, key, out);
- GF_VALIDATE_OR_GOTO (THIS->name, val, out);
- GF_VALIDATE_OR_GOTO (THIS->name, data, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, key, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, val, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, data, out);
- if (strcmp (key, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
- goto out;
+ if (strcmp(key, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
+ goto out;
- fp = (FILE *) data;
- fprintf (fp, "%s: %s\n", key, val->data);
+ fp = (FILE *)data;
+ fprintf(fp, "%s: %s\n", key, val->data);
out:
- return 0;
+ return 0;
}
static int
-glusterd_print_volume_options (dict_t *opts, char *key, data_t *val, void *data)
+glusterd_print_volume_options(dict_t *opts, char *key, data_t *val, void *data)
{
- FILE *fp = NULL;
+ FILE *fp = NULL;
- GF_VALIDATE_OR_GOTO (THIS->name, key, out);
- GF_VALIDATE_OR_GOTO (THIS->name, val, out);
- GF_VALIDATE_OR_GOTO (THIS->name, data, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, key, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, val, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, data, out);
- fp = (FILE *) data;
- fprintf (fp, "Volume%d.options.%s: %s\n", volcount, key, val->data);
+ fp = (FILE *)data;
+ fprintf(fp, "Volume%d.options.%s: %s\n", volcount, key, val->data);
out:
- return 0;
+ return 0;
}
static int
-glusterd_print_gsync_status (FILE *fp, dict_t *gsync_dict)
+glusterd_print_gsync_status(FILE *fp, dict_t *gsync_dict)
{
- int ret = -1;
- int gsync_count = 0;
- int i = 0;
- gf_gsync_status_t *status_vals = NULL;
- char status_val_name[PATH_MAX] = {0,};
+ int ret = -1;
+ int gsync_count = 0;
+ int i = 0;
+ gf_gsync_status_t *status_vals = NULL;
+ char status_val_name[PATH_MAX] = {
+ 0,
+ };
- GF_VALIDATE_OR_GOTO (THIS->name, fp, out);
- GF_VALIDATE_OR_GOTO (THIS->name, gsync_dict, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, fp, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, gsync_dict, out);
- ret = dict_get_int32n (gsync_dict, "gsync-count",
- SLEN ("gsync-count"), &gsync_count);
+ ret = dict_get_int32n(gsync_dict, "gsync-count", SLEN("gsync-count"),
+ &gsync_count);
- fprintf (fp, "Volume%d.gsync_count: %d\n", volcount, gsync_count);
+ fprintf(fp, "Volume%d.gsync_count: %d\n", volcount, gsync_count);
- if (gsync_count == 0) {
- ret = 0;
- goto out;
- }
-
- for (i = 0; i < gsync_count; i++) {
- snprintf (status_val_name, sizeof(status_val_name),
- "status_value%d", i);
+ if (gsync_count == 0) {
+ ret = 0;
+ goto out;
+ }
- ret = dict_get_bin (gsync_dict, status_val_name,
- (void **)&(status_vals));
- if (ret)
- goto out;
+ for (i = 0; i < gsync_count; i++) {
+ snprintf(status_val_name, sizeof(status_val_name), "status_value%d", i);
- fprintf (fp, "Volume%d.pair%d.session_slave: %s\n", volcount, i+1,
- get_struct_variable(21, status_vals));
- fprintf (fp, "Volume%d.pair%d.master_node: %s\n", volcount, i+1,
- get_struct_variable(0, status_vals));
- fprintf (fp, "Volume%d.pair%d.master_volume: %s\n", volcount, i+1,
- get_struct_variable(1, status_vals));
- fprintf (fp, "Volume%d.pair%d.master_brick: %s\n", volcount, i+1,
- get_struct_variable(2, status_vals));
- fprintf (fp, "Volume%d.pair%d.slave_user: %s\n", volcount, i+1,
- get_struct_variable(3, status_vals));
- fprintf (fp, "Volume%d.pair%d.slave: %s\n", volcount, i+1,
- get_struct_variable(4, status_vals));
- fprintf (fp, "Volume%d.pair%d.slave_node: %s\n", volcount, i+1,
- get_struct_variable(5, status_vals));
- fprintf (fp, "Volume%d.pair%d.status: %s\n", volcount, i+1,
- get_struct_variable(6, status_vals));
- fprintf (fp, "Volume%d.pair%d.crawl_status: %s\n", volcount, i+1,
- get_struct_variable(7, status_vals));
- fprintf (fp, "Volume%d.pair%d.last_synced: %s\n", volcount, i+1,
- get_struct_variable(8, status_vals));
- fprintf (fp, "Volume%d.pair%d.entry: %s\n", volcount, i+1,
- get_struct_variable(9, status_vals));
- fprintf (fp, "Volume%d.pair%d.data: %s\n", volcount, i+1,
- get_struct_variable(10, status_vals));
- fprintf (fp, "Volume%d.pair%d.meta: %s\n", volcount, i+1,
- get_struct_variable(11, status_vals));
- fprintf (fp, "Volume%d.pair%d.failures: %s\n", volcount, i+1,
- get_struct_variable(12, status_vals));
- fprintf (fp, "Volume%d.pair%d.checkpoint_time: %s\n", volcount,
- i+1, get_struct_variable(13, status_vals));
- fprintf (fp, "Volume%d.pair%d.checkpoint_completed: %s\n",
- volcount, i+1, get_struct_variable(14, status_vals));
- fprintf (fp, "Volume%d.pair%d.checkpoint_completion_time: %s\n",
- volcount, i+1, get_struct_variable(15, status_vals));
- }
+ ret = dict_get_bin(gsync_dict, status_val_name,
+ (void **)&(status_vals));
+ if (ret)
+ goto out;
+
+ fprintf(fp, "Volume%d.pair%d.session_slave: %s\n", volcount, i + 1,
+ get_struct_variable(21, status_vals));
+ fprintf(fp, "Volume%d.pair%d.master_node: %s\n", volcount, i + 1,
+ get_struct_variable(0, status_vals));
+ fprintf(fp, "Volume%d.pair%d.master_volume: %s\n", volcount, i + 1,
+ get_struct_variable(1, status_vals));
+ fprintf(fp, "Volume%d.pair%d.master_brick: %s\n", volcount, i + 1,
+ get_struct_variable(2, status_vals));
+ fprintf(fp, "Volume%d.pair%d.slave_user: %s\n", volcount, i + 1,
+ get_struct_variable(3, status_vals));
+ fprintf(fp, "Volume%d.pair%d.slave: %s\n", volcount, i + 1,
+ get_struct_variable(4, status_vals));
+ fprintf(fp, "Volume%d.pair%d.slave_node: %s\n", volcount, i + 1,
+ get_struct_variable(5, status_vals));
+ fprintf(fp, "Volume%d.pair%d.status: %s\n", volcount, i + 1,
+ get_struct_variable(6, status_vals));
+ fprintf(fp, "Volume%d.pair%d.crawl_status: %s\n", volcount, i + 1,
+ get_struct_variable(7, status_vals));
+ fprintf(fp, "Volume%d.pair%d.last_synced: %s\n", volcount, i + 1,
+ get_struct_variable(8, status_vals));
+ fprintf(fp, "Volume%d.pair%d.entry: %s\n", volcount, i + 1,
+ get_struct_variable(9, status_vals));
+ fprintf(fp, "Volume%d.pair%d.data: %s\n", volcount, i + 1,
+ get_struct_variable(10, status_vals));
+ fprintf(fp, "Volume%d.pair%d.meta: %s\n", volcount, i + 1,
+ get_struct_variable(11, status_vals));
+ fprintf(fp, "Volume%d.pair%d.failures: %s\n", volcount, i + 1,
+ get_struct_variable(12, status_vals));
+ fprintf(fp, "Volume%d.pair%d.checkpoint_time: %s\n", volcount, i + 1,
+ get_struct_variable(13, status_vals));
+ fprintf(fp, "Volume%d.pair%d.checkpoint_completed: %s\n", volcount,
+ i + 1, get_struct_variable(14, status_vals));
+ fprintf(fp, "Volume%d.pair%d.checkpoint_completion_time: %s\n",
+ volcount, i + 1, get_struct_variable(15, status_vals));
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_print_gsync_status_by_vol (FILE *fp, glusterd_volinfo_t *volinfo)
+glusterd_print_gsync_status_by_vol(FILE *fp, glusterd_volinfo_t *volinfo)
{
- int ret = -1;
- dict_t *gsync_rsp_dict = NULL;
- char my_hostname[256] = {0,};
+ int ret = -1;
+ dict_t *gsync_rsp_dict = NULL;
+ char my_hostname[256] = {
+ 0,
+ };
- GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
- GF_VALIDATE_OR_GOTO (THIS->name, fp, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, fp, out);
- gsync_rsp_dict = dict_new();
- if (!gsync_rsp_dict)
- goto out;
+ gsync_rsp_dict = dict_new();
+ if (!gsync_rsp_dict)
+ goto out;
- ret = gethostname(my_hostname, sizeof (my_hostname));
- if (ret) {
- /* stick to N/A */
- (void) strcpy (my_hostname, "N/A");
- }
+ ret = gethostname(my_hostname, sizeof(my_hostname));
+ if (ret) {
+ /* stick to N/A */
+ (void)strcpy(my_hostname, "N/A");
+ }
- ret = glusterd_get_gsync_status_mst (volinfo, gsync_rsp_dict,
- my_hostname);
- /* Ignoring ret as above function always returns ret = 0 */
+ ret = glusterd_get_gsync_status_mst(volinfo, gsync_rsp_dict, my_hostname);
+ /* Ignoring ret as above function always returns ret = 0 */
- ret = glusterd_print_gsync_status (fp, gsync_rsp_dict);
+ ret = glusterd_print_gsync_status(fp, gsync_rsp_dict);
out:
- if (gsync_rsp_dict)
- dict_unref (gsync_rsp_dict);
- return ret;
+ if (gsync_rsp_dict)
+ dict_unref(gsync_rsp_dict);
+ return ret;
}
static int
-glusterd_print_snapinfo_by_vol (FILE *fp, glusterd_volinfo_t *volinfo, int volcount)
+glusterd_print_snapinfo_by_vol(FILE *fp, glusterd_volinfo_t *volinfo,
+ int volcount)
{
- int ret = -1;
- glusterd_volinfo_t *snap_vol = NULL;
- glusterd_volinfo_t *tmp_vol = NULL;
- glusterd_snap_t *snapinfo = NULL;
- int snapcount = 0;
- char timestr[64] = {0,};
- char snap_status_str[STATUS_STRLEN] = {0,};
-
- GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
- GF_VALIDATE_OR_GOTO (THIS->name, fp, out);
-
- cds_list_for_each_entry_safe (snap_vol, tmp_vol, &volinfo->snap_volumes,
- snapvol_list) {
- snapcount++;
- snapinfo = snap_vol->snapshot;
-
- ret = glusterd_get_snap_status_str (snapinfo, snap_status_str);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get status for snapshot: %s",
- snapinfo->snapname);
+ int ret = -1;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp_vol = NULL;
+ glusterd_snap_t *snapinfo = NULL;
+ int snapcount = 0;
+ char timestr[64] = {
+ 0,
+ };
+ char snap_status_str[STATUS_STRLEN] = {
+ 0,
+ };
+
+ GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, fp, out);
+
+ cds_list_for_each_entry_safe(snap_vol, tmp_vol, &volinfo->snap_volumes,
+ snapvol_list)
+ {
+ snapcount++;
+ snapinfo = snap_vol->snapshot;
+
+ ret = glusterd_get_snap_status_str(snapinfo, snap_status_str);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get status for snapshot: %s", snapinfo->snapname);
- goto out;
- }
- gf_time_fmt (timestr, sizeof timestr, snapinfo->time_stamp,
- gf_timefmt_FT);
-
- fprintf (fp, "Volume%d.snapshot%d.name: %s\n",
- volcount, snapcount, snapinfo->snapname);
- fprintf (fp, "Volume%d.snapshot%d.id: %s\n", volcount, snapcount,
- uuid_utoa (snapinfo->snap_id));
- fprintf (fp, "Volume%d.snapshot%d.time: %s\n",
- volcount, snapcount, timestr);
-
- if (snapinfo->description)
- fprintf (fp, "Volume%d.snapshot%d.description: %s\n",
- volcount, snapcount, snapinfo->description);
- fprintf (fp, "Volume%d.snapshot%d.status: %s\n",
- volcount, snapcount, snap_status_str);
+ goto out;
}
+ gf_time_fmt(timestr, sizeof timestr, snapinfo->time_stamp,
+ gf_timefmt_FT);
- ret = 0;
+ fprintf(fp, "Volume%d.snapshot%d.name: %s\n", volcount, snapcount,
+ snapinfo->snapname);
+ fprintf(fp, "Volume%d.snapshot%d.id: %s\n", volcount, snapcount,
+ uuid_utoa(snapinfo->snap_id));
+ fprintf(fp, "Volume%d.snapshot%d.time: %s\n", volcount, snapcount,
+ timestr);
+
+ if (snapinfo->description)
+ fprintf(fp, "Volume%d.snapshot%d.description: %s\n", volcount,
+ snapcount, snapinfo->description);
+ fprintf(fp, "Volume%d.snapshot%d.status: %s\n", volcount, snapcount,
+ snap_status_str);
+ }
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
static int
-glusterd_print_client_details (FILE *fp, dict_t *dict,
- glusterd_volinfo_t *volinfo, int volcount,
- glusterd_brickinfo_t *brickinfo, int brickcount)
+glusterd_print_client_details(FILE *fp, dict_t *dict,
+ glusterd_volinfo_t *volinfo, int volcount,
+ glusterd_brickinfo_t *brickinfo, int brickcount)
{
- int ret = -1;
- xlator_t *this = NULL;
- int brick_index = -1;
- int client_count = 0;
- char key[64] = {0,};
- int keylen;
- char *clientname = NULL;
- uint64_t bytesread = 0;
- uint64_t byteswrite = 0;
- uint32_t opversion = 0;
-
- glusterd_pending_node_t *pending_node = NULL;
- rpc_clnt_t *rpc = NULL;
- struct syncargs args = {0,};
- gd1_mgmt_brick_op_req *brick_req = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- GF_VALIDATE_OR_GOTO (this->name, dict, out);
-
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID) ||
- !glusterd_is_brick_started (brickinfo)) {
- ret = 0;
- goto out;
- }
-
- brick_index++;
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "Unable to allocate memory");
- goto out;
- }
-
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- pending_node->index = brick_index;
-
- rpc = glusterd_pending_node_get_rpc (pending_node);
- if (!rpc) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
- "Failed to retrieve rpc object");
- goto out;
- }
-
- brick_req = GF_CALLOC (1, sizeof (*brick_req),
- gf_gld_mt_mop_brick_req_t);
- if (!brick_req) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "Unable to allocate memory");
- goto out;
- }
-
- brick_req->op = GLUSTERD_BRICK_STATUS;
- brick_req->name = "";
-
- ret = dict_set_strn (dict, "brick-name", SLEN ("brick-name"),
- brickinfo->path);
- if (ret)
- goto out;
-
- ret = dict_set_int32n (dict, "cmd", SLEN ("cmd"),
- GF_CLI_STATUS_CLIENTS);
- if (ret)
- goto out;
+ int ret = -1;
+ xlator_t *this = NULL;
+ int brick_index = -1;
+ int client_count = 0;
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+ char *clientname = NULL;
+ uint64_t bytesread = 0;
+ uint64_t byteswrite = 0;
+ uint32_t opversion = 0;
+
+ glusterd_pending_node_t *pending_node = NULL;
+ rpc_clnt_t *rpc = NULL;
+ struct syncargs args = {
+ 0,
+ };
+ gd1_mgmt_brick_op_req *brick_req = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
+ !glusterd_is_brick_started(brickinfo)) {
+ ret = 0;
+ goto out;
+ }
- ret = dict_set_strn (dict, "volname", SLEN ("volname"),
- volinfo->volname);
- if (ret)
- goto out;
+ brick_index++;
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Unable to allocate memory");
+ goto out;
+ }
- ret = dict_allocate_and_serialize (dict, &brick_req->input.input_val,
- &brick_req->input.input_len);
- if (ret)
- goto out;
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ pending_node->index = brick_index;
- GD_SYNCOP (rpc, (&args), NULL, gd_syncop_brick_op_cbk, brick_req,
- &gd_brick_prog, brick_req->op, xdr_gd1_mgmt_brick_op_req);
+ rpc = glusterd_pending_node_get_rpc(pending_node);
+ if (!rpc) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
+ "Failed to retrieve rpc object");
+ goto out;
+ }
- if (args.op_ret)
- goto out;
+ brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t);
+ if (!brick_req) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Unable to allocate memory");
+ goto out;
+ }
+
+ brick_req->op = GLUSTERD_BRICK_STATUS;
+ brick_req->name = "";
+
+ ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"),
+ brickinfo->path);
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32n(dict, "cmd", SLEN("cmd"), GF_CLI_STATUS_CLIENTS);
+ if (ret)
+ goto out;
+
+ ret = dict_set_strn(dict, "volname", SLEN("volname"), volinfo->volname);
+ if (ret)
+ goto out;
+
+ ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
+ &brick_req->input.input_len);
+ if (ret)
+ goto out;
+
+ GD_SYNCOP(rpc, (&args), NULL, gd_syncop_brick_op_cbk, brick_req,
+ &gd_brick_prog, brick_req->op, xdr_gd1_mgmt_brick_op_req);
+
+ if (args.op_ret)
+ goto out;
+
+ ret = dict_get_int32n(args.dict, "clientcount", SLEN("clientcount"),
+ &client_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Couldn't get client count");
+ goto out;
+ }
+
+ fprintf(fp, "Volume%d.Brick%d.client_count: %d\n", volcount, brickcount,
+ client_count);
+
+ if (client_count == 0) {
+ ret = 0;
+ goto out;
+ }
- ret = dict_get_int32n (args.dict, "clientcount", SLEN("clientcount"),
- &client_count);
+ int i;
+ for (i = 1; i <= client_count; i++) {
+ keylen = snprintf(key, sizeof(key), "client%d.hostname", i - 1);
+ ret = dict_get_strn(args.dict, key, keylen, &clientname);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
- "Couldn't get client count");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get client hostname");
+ goto out;
}
- fprintf (fp, "Volume%d.Brick%d.client_count: %d\n",
- volcount, brickcount, client_count);
+ snprintf(key, sizeof(key), "Client%d.hostname", i);
+ fprintf(fp, "Volume%d.Brick%d.%s: %s\n", volcount, brickcount, key,
+ clientname);
- if (client_count == 0) {
- ret = 0;
- goto out;
+ snprintf(key, sizeof(key), "client%d.bytesread", i - 1);
+ ret = dict_get_uint64(args.dict, key, &bytesread);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get bytesread from client");
+ goto out;
}
- int i;
- for (i = 1; i <= client_count; i++) {
- keylen = snprintf (key, sizeof (key),
- "client%d.hostname", i-1);
- ret = dict_get_strn (args.dict, key, keylen, &clientname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get client hostname");
- goto out;
- }
-
- snprintf (key, sizeof (key), "Client%d.hostname", i);
- fprintf (fp, "Volume%d.Brick%d.%s: %s\n",
- volcount, brickcount, key, clientname);
-
- snprintf (key, sizeof (key), "client%d.bytesread", i-1);
- ret = dict_get_uint64 (args.dict, key, &bytesread);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get bytesread from client");
- goto out;
- }
-
- snprintf (key, sizeof (key), "Client%d.bytesread", i);
- fprintf (fp, "Volume%d.Brick%d.%s: %"PRIu64"\n",
- volcount, brickcount, key, bytesread);
-
- snprintf (key, sizeof (key), "client%d.byteswrite", i-1);
- ret = dict_get_uint64 (args.dict, key, &byteswrite);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get byteswrite from client");
- goto out;
- }
-
- snprintf (key, sizeof (key), "Client%d.byteswrite", i);
- fprintf (fp, "Volume%d.Brick%d.%s: %"PRIu64"\n",
- volcount, brickcount, key, byteswrite);
-
- snprintf (key, sizeof (key), "client%d.opversion", i-1);
- ret = dict_get_uint32 (args.dict, key, &opversion);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get client opversion");
- goto out;
- }
+ snprintf(key, sizeof(key), "Client%d.bytesread", i);
+ fprintf(fp, "Volume%d.Brick%d.%s: %" PRIu64 "\n", volcount, brickcount,
+ key, bytesread);
- snprintf (key, sizeof (key), "Client%d.opversion", i);
- fprintf (fp, "Volume%d.Brick%d.%s: %"PRIu32"\n",
- volcount, brickcount, key, opversion);
+ snprintf(key, sizeof(key), "client%d.byteswrite", i - 1);
+ ret = dict_get_uint64(args.dict, key, &byteswrite);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get byteswrite from client");
+ goto out;
}
-out:
- if (pending_node)
- GF_FREE (pending_node);
+ snprintf(key, sizeof(key), "Client%d.byteswrite", i);
+ fprintf(fp, "Volume%d.Brick%d.%s: %" PRIu64 "\n", volcount, brickcount,
+ key, byteswrite);
- if (brick_req) {
- if (brick_req->input.input_val)
- GF_FREE (brick_req->input.input_val);
- GF_FREE (brick_req);
+ snprintf(key, sizeof(key), "client%d.opversion", i - 1);
+ ret = dict_get_uint32(args.dict, key, &opversion);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get client opversion");
+ goto out;
}
- if (args.dict)
- dict_unref (args.dict);
- if (args.errstr)
- GF_FREE (args.errstr);
- return ret;
+ snprintf(key, sizeof(key), "Client%d.opversion", i);
+ fprintf(fp, "Volume%d.Brick%d.%s: %" PRIu32 "\n", volcount, brickcount,
+ key, opversion);
+ }
+
+out:
+ if (pending_node)
+ GF_FREE(pending_node);
+
+ if (brick_req) {
+ if (brick_req->input.input_val)
+ GF_FREE(brick_req->input.input_val);
+ GF_FREE(brick_req);
+ }
+ if (args.dict)
+ dict_unref(args.dict);
+ if (args.errstr)
+ GF_FREE(args.errstr);
+
+ return ret;
}
static int
-glusterd_get_state (rpcsvc_request_t *req, dict_t *dict)
+glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
{
- int32_t ret = -1;
- gf_cli_rsp rsp = {0,};
- FILE *fp = NULL;
- DIR *dp = NULL;
- char err_str[2048] = {0,};
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peer_hostname_t *peer_hostname_info = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- xlator_t *this = NULL;
- dict_t *vol_all_opts = NULL;
- struct statvfs brickstat = {0};
- char *odir = NULL;
- char *filename = NULL;
- char *ofilepath = NULL;
- char *tmp_str = NULL;
- int count = 0;
- int count_bkp = 0;
- int odirlen = 0;
- time_t now = 0;
- char timestamp[16] = {0,};
- uint32_t get_state_cmd = 0;
- uint64_t memtotal = 0;
- uint64_t memfree = 0;
- int start_index = 0;
- char id_str[64] = {0,};
-
- char *vol_type_str = NULL;
- char *hot_tier_type_str = NULL;
- char *cold_tier_type_str = NULL;
-
- char transport_type_str[STATUS_STRLEN] = {0,};
- char quorum_status_str[STATUS_STRLEN] = {0,};
- char rebal_status_str[STATUS_STRLEN] = {0,};
- char vol_status_str[STATUS_STRLEN] = {0,};
-
- this = THIS;
- GF_VALIDATE_OR_GOTO (THIS->name, this, out);
-
- priv = THIS->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
-
- GF_VALIDATE_OR_GOTO (this->name, dict, out);
-
- ret = dict_get_strn (dict, "odir", SLEN ("odir"), &tmp_str);
- if (ret) {
- odirlen = gf_asprintf (&odir, "%s", "/var/run/gluster/");
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_DICT_GET_FAILED,
- "Default output directory: %s", odir);
- } else {
- odirlen = gf_asprintf (&odir, "%s", tmp_str);
- }
-
- dp = sys_opendir (odir);
- if (dp) {
- sys_closedir (dp);
- } else {
- if (errno == ENOENT) {
- snprintf (err_str, sizeof (err_str),
- "Output directory %s does not exist.", odir);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- } else if (errno == ENOTDIR) {
- snprintf (err_str, sizeof (err_str), "Output directory "
- "does not exist. %s points to a file.", odir);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- }
+ int32_t ret = -1;
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ FILE *fp = NULL;
+ DIR *dp = NULL;
+ char err_str[2048] = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peer_hostname_t *peer_hostname_info = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ xlator_t *this = NULL;
+ dict_t *vol_all_opts = NULL;
+ struct statvfs brickstat = {0};
+ char *odir = NULL;
+ char *filename = NULL;
+ char *ofilepath = NULL;
+ char *tmp_str = NULL;
+ int count = 0;
+ int count_bkp = 0;
+ int odirlen = 0;
+ time_t now = 0;
+ char timestamp[16] = {
+ 0,
+ };
+ uint32_t get_state_cmd = 0;
+ uint64_t memtotal = 0;
+ uint64_t memfree = 0;
+ int start_index = 0;
+ char id_str[64] = {
+ 0,
+ };
+
+ char *vol_type_str = NULL;
+ char *hot_tier_type_str = NULL;
+ char *cold_tier_type_str = NULL;
+
+ char transport_type_str[STATUS_STRLEN] = {
+ 0,
+ };
+ char quorum_status_str[STATUS_STRLEN] = {
+ 0,
+ };
+ char rebal_status_str[STATUS_STRLEN] = {
+ 0,
+ };
+ char vol_status_str[STATUS_STRLEN] = {
+ 0,
+ };
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(THIS->name, this, out);
+
+ priv = THIS->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ ret = dict_get_strn(dict, "odir", SLEN("odir"), &tmp_str);
+ if (ret) {
+ odirlen = gf_asprintf(&odir, "%s", "/var/run/gluster/");
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
+ "Default output directory: %s", odir);
+ } else {
+ odirlen = gf_asprintf(&odir, "%s", tmp_str);
+ }
+
+ dp = sys_opendir(odir);
+ if (dp) {
+ sys_closedir(dp);
+ } else {
+ if (errno == ENOENT) {
+ snprintf(err_str, sizeof(err_str),
+ "Output directory %s does not exist.", odir);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ } else if (errno == ENOTDIR) {
+ snprintf(err_str, sizeof(err_str),
+ "Output directory "
+ "does not exist. %s points to a file.",
+ odir);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ }
+
+ GF_FREE(odir);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "filename", SLEN("filename"), &tmp_str);
+ if (ret) {
+ now = time(NULL);
+ strftime(timestamp, sizeof(timestamp), "%Y%m%d_%H%M%S",
+ localtime(&now));
+ gf_asprintf(&filename, "%s_%s", "glusterd_state", timestamp);
+
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
+ "Default filename: %s", filename);
+ } else {
+ gf_asprintf(&filename, "%s", tmp_str);
+ }
+
+ if (odir[odirlen - 1] != '/')
+ strcat(odir, "/");
+
+ ret = gf_asprintf(&ofilepath, "%s%s", odir, filename);
+ if (ret < 0) {
+ GF_FREE(odir);
+ GF_FREE(filename);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to get the output path");
+ ret = -1;
+ goto out;
+ }
+ GF_FREE(odir);
+ GF_FREE(filename);
+
+ ret = dict_set_dynstrn(dict, "ofilepath", SLEN("ofilepath"), ofilepath);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set output path");
+ goto out;
+ }
+
+ fp = fopen(ofilepath, "w");
+ if (!fp) {
+ snprintf(err_str, sizeof(err_str), "Failed to open file at %s",
+ ofilepath);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
+ err_str);
+ ret = -1;
+ goto out;
+ }
- GF_FREE (odir);
- ret = -1;
- goto out;
- }
+ ret = dict_get_uint32(dict, "getstate-cmd", &get_state_cmd);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "get-state command type not set");
+ ret = 0;
+ }
- ret = dict_get_strn (dict, "filename", SLEN ("filename"), &tmp_str);
- if (ret) {
- now = time (NULL);
- strftime (timestamp, sizeof (timestamp),
- "%Y%m%d_%H%M%S", localtime (&now));
- gf_asprintf (&filename, "%s_%s", "glusterd_state", timestamp);
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_DICT_GET_FAILED,
- "Default filename: %s", filename);
- } else {
- gf_asprintf (&filename, "%s", tmp_str);
- }
+ if (get_state_cmd == GF_CLI_GET_STATE_VOLOPTS) {
+ fprintf(fp, "[Volume Options]\n");
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+ fprintf(fp, "Volume%d.name: %s\n", ++count, volinfo->volname);
- if (odir[odirlen-1] != '/')
- strcat (odir, "/");
+ volcount = count;
+ vol_all_opts = dict_new();
- ret = gf_asprintf (&ofilepath, "%s%s", odir, filename);
- if (ret < 0) {
- GF_FREE (odir);
- GF_FREE (filename);
- gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Unable to get the output path");
- ret = -1;
- goto out;
- }
- GF_FREE (odir);
- GF_FREE (filename);
+ ret = glusterd_get_default_val_for_volopt(
+ vol_all_opts, _gf_true, NULL, NULL, volinfo, &rsp.op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_OPTS_IMPORT_FAIL,
+ "Failed to "
+ "fetch the value of all volume options "
+ "for volume %s",
+ volinfo->volname);
+ continue;
+ }
- ret = dict_set_dynstrn (dict, "ofilepath", SLEN ("ofilepath"),
- ofilepath);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Unable to set output path");
- goto out;
- }
+ dict_foreach(vol_all_opts, glusterd_print_volume_options, fp);
- fp = fopen (ofilepath, "w");
- if (!fp) {
- snprintf (err_str, sizeof (err_str),
- "Failed to open file at %s", ofilepath);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", err_str);
- ret = -1;
- goto out;
+ if (vol_all_opts)
+ dict_unref(vol_all_opts);
}
+ ret = 0;
+ goto out;
+ }
- ret = dict_get_uint32 (dict, "getstate-cmd", &get_state_cmd);
- if (ret) {
- gf_msg_debug (this->name, 0, "get-state command type not set");
- ret = 0;
- }
+ fprintf(fp, "[Global]\n");
- if (get_state_cmd == GF_CLI_GET_STATE_VOLOPTS) {
- fprintf (fp, "[Volume Options]\n");
- cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
- fprintf (fp, "Volume%d.name: %s\n",
- ++count, volinfo->volname);
-
- volcount = count;
- vol_all_opts = dict_new ();
-
- ret = glusterd_get_default_val_for_volopt (vol_all_opts,
- _gf_true, NULL, NULL, volinfo,
- &rsp.op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_OPTS_IMPORT_FAIL, "Failed to "
- "fetch the value of all volume options "
- "for volume %s", volinfo->volname);
- continue;
- }
+ uuid_utoa_r(priv->uuid, id_str);
+ fprintf(fp, "MYUUID: %s\n", id_str);
- dict_foreach (vol_all_opts, glusterd_print_volume_options,
- fp);
+ fprintf(fp, "op-version: %d\n", priv->op_version);
- if (vol_all_opts)
- dict_unref (vol_all_opts);
- }
- ret = 0;
- goto out;
- }
-
- fprintf (fp, "[Global]\n");
+ fprintf(fp, "\n[Global options]\n");
- uuid_utoa_r (priv->uuid, id_str);
- fprintf (fp, "MYUUID: %s\n", id_str);
+ if (priv->opts)
+ dict_foreach(priv->opts, glusterd_print_global_options, fp);
- fprintf (fp, "op-version: %d\n", priv->op_version);
+ rcu_read_lock();
+ fprintf(fp, "\n[Peers]\n");
- fprintf (fp, "\n[Global options]\n");
+ cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
+ {
+ fprintf(fp, "Peer%d.primary_hostname: %s\n", ++count,
+ peerinfo->hostname);
+ fprintf(fp, "Peer%d.uuid: %s\n", count, gd_peer_uuid_str(peerinfo));
+ fprintf(fp, "Peer%d.state: %s\n", count,
+ glusterd_friend_sm_state_name_get(peerinfo->state.state));
+ fprintf(fp, "Peer%d.connected: %s\n", count,
+ peerinfo->connected ? "Connected" : "Disconnected");
- if (priv->opts)
- dict_foreach (priv->opts, glusterd_print_global_options, fp);
+ fprintf(fp, "Peer%d.othernames: ", count);
+ count_bkp = 0;
+ cds_list_for_each_entry(peer_hostname_info, &peerinfo->hostnames,
+ hostname_list)
+ {
+ if (strcmp(peerinfo->hostname, peer_hostname_info->hostname) == 0)
+ continue;
- rcu_read_lock ();
- fprintf (fp, "\n[Peers]\n");
+ if (count_bkp > 0)
+ fprintf(fp, ",");
- cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
- fprintf (fp, "Peer%d.primary_hostname: %s\n", ++count,
- peerinfo->hostname);
- fprintf (fp, "Peer%d.uuid: %s\n", count, gd_peer_uuid_str (peerinfo));
- fprintf (fp, "Peer%d.state: %s\n", count,
- glusterd_friend_sm_state_name_get (peerinfo->state.state));
- fprintf (fp, "Peer%d.connected: %s\n", count,
- peerinfo->connected ? "Connected" : "Disconnected");
-
- fprintf (fp, "Peer%d.othernames: ", count);
- count_bkp = 0;
- cds_list_for_each_entry (peer_hostname_info,
- &peerinfo->hostnames, hostname_list) {
- if (strcmp (peerinfo->hostname,
- peer_hostname_info->hostname) == 0)
- continue;
-
- if (count_bkp > 0)
- fprintf (fp, ",");
-
- fprintf (fp, "%s", peer_hostname_info->hostname);
- count_bkp++;
- }
- count_bkp = 0;
- fprintf (fp, "\n");
+ fprintf(fp, "%s", peer_hostname_info->hostname);
+ count_bkp++;
}
- rcu_read_unlock ();
-
- count = 0;
- fprintf (fp, "\n[Volumes]\n");
-
- cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
- ret = glusterd_volume_get_type_str (volinfo, &vol_type_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get type for volume: %s",
- volinfo->volname);
- goto out;
- }
-
- ret = glusterd_volume_get_status_str (volinfo, vol_status_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get status for volume: %s",
- volinfo->volname);
- goto out;
- }
+ count_bkp = 0;
+ fprintf(fp, "\n");
+ }
+ rcu_read_unlock();
- ret = glusterd_volume_get_transport_type_str (volinfo,
- transport_type_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get transport type for volume: %s",
- volinfo->volname);
- goto out;
- }
+ count = 0;
+ fprintf(fp, "\n[Volumes]\n");
- ret = glusterd_volume_get_quorum_status_str (volinfo,
- quorum_status_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get quorum status for volume: %s",
- volinfo->volname);
- goto out;
- }
-
- ret = glusterd_volume_get_rebalance_status_str (volinfo,
- rebal_status_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get rebalance status for volume: %s",
- volinfo->volname);
- goto out;
- }
-
- fprintf (fp, "Volume%d.name: %s\n", ++count, volinfo->volname);
-
- uuid_utoa_r (volinfo->volume_id, id_str);
- fprintf (fp, "Volume%d.id: %s\n", count, id_str);
-
- fprintf (fp, "Volume%d.type: %s\n", count, vol_type_str);
- fprintf (fp, "Volume%d.transport_type: %s\n", count,
- transport_type_str);
- fprintf (fp, "Volume%d.status: %s\n", count, vol_status_str);
- fprintf (fp, "Volume%d.profile_enabled: %d\n", count,
- glusterd_is_profile_on (volinfo));
- fprintf (fp, "Volume%d.brickcount: %d\n", count,
- volinfo->brick_count);
-
- count_bkp = count;
- count = 0;
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- fprintf (fp, "Volume%d.Brick%d.path: %s:%s\n",
- count_bkp, ++count, brickinfo->hostname,
- brickinfo->path);
- fprintf (fp, "Volume%d.Brick%d.hostname: %s\n",
- count_bkp, count, brickinfo->hostname);
- /* Determine which one is the arbiter brick */
- if (volinfo->arbiter_count == 1) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- if (volinfo->tier_info.cold_replica_count != 1) {
- start_index =
- volinfo->tier_info.hot_brick_count + 1;
- if (count >= start_index &&
- ((count - start_index + 1) %
- volinfo->tier_info.cold_replica_count == 0)) {
- fprintf (fp, "Volume%d.Brick%d."
- "is_arbiter: 1\n",
- count_bkp,
- count);
- }
- }
- } else {
- if (count %
- volinfo->replica_count == 0) {
- fprintf (fp, "Volume%d.Brick%d."
- "is_arbiter: 1\n",
- count_bkp, count);
- }
- }
- }
- /* Add following information only for bricks
- * local to current node */
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID))
- continue;
- fprintf (fp, "Volume%d.Brick%d.port: %d\n", count_bkp,
- count, brickinfo->port);
- fprintf (fp, "Volume%d.Brick%d.rdma_port: %d\n", count_bkp,
- count, brickinfo->rdma_port);
- fprintf (fp, "Volume%d.Brick%d.port_registered: %d\n",
- count_bkp, count, brickinfo->port_registered);
- fprintf (fp, "Volume%d.Brick%d.status: %s\n", count_bkp,
- count, brickinfo->status ? "Started" : "Stopped");
-
- /*FIXME: This is a hacky way of figuring out whether a
- * brick belongs to the hot or cold tier */
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- fprintf (fp, "Volume%d.Brick%d.tier: %s\n",
- count_bkp, count,
- count <= volinfo->tier_info.hot_brick_count ?
- "Hot" : "Cold");
- }
-
- ret = sys_statvfs (brickinfo->path, &brickstat);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_FILE_OP_FAILED,
- "statfs error: %s ", strerror (errno));
- goto out;
- }
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+ ret = glusterd_volume_get_type_str(volinfo, &vol_type_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get type for volume: %s", volinfo->volname);
+ goto out;
+ }
- memfree = brickstat.f_bfree * brickstat.f_bsize;
- memtotal = brickstat.f_blocks * brickstat.f_bsize;
+ ret = glusterd_volume_get_status_str(volinfo, vol_status_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get status for volume: %s", volinfo->volname);
+ goto out;
+ }
- fprintf (fp, "Volume%d.Brick%d.spacefree: %"PRIu64"Bytes\n",
- count_bkp, count, memfree);
- fprintf (fp, "Volume%d.Brick%d.spacetotal: %"PRIu64"Bytes\n",
- count_bkp, count, memtotal);
+ ret = glusterd_volume_get_transport_type_str(volinfo,
+ transport_type_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get transport type for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
- if (get_state_cmd != GF_CLI_GET_STATE_DETAIL)
- continue;
+ ret = glusterd_volume_get_quorum_status_str(volinfo, quorum_status_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get quorum status for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
- ret = glusterd_print_client_details (fp, dict,
- volinfo, count_bkp,
- brickinfo, count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CLIENTS_GET_STATE_FAILED,
- "Failed to get client details");
- goto out;
- }
- }
+ ret = glusterd_volume_get_rebalance_status_str(volinfo,
+ rebal_status_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get rebalance status for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
- count = count_bkp;
+ fprintf(fp, "Volume%d.name: %s\n", ++count, volinfo->volname);
- ret = glusterd_print_snapinfo_by_vol (fp, volinfo, count);
- if (ret)
- goto out;
+ uuid_utoa_r(volinfo->volume_id, id_str);
+ fprintf(fp, "Volume%d.id: %s\n", count, id_str);
- fprintf (fp, "Volume%d.snap_count: %"PRIu64"\n", count,
- volinfo->snap_count);
- fprintf (fp, "Volume%d.stripe_count: %d\n", count,
- volinfo->stripe_count);
- fprintf (fp, "Volume%d.replica_count: %d\n", count,
- volinfo->replica_count);
- fprintf (fp, "Volume%d.subvol_count: %d\n", count,
- volinfo->subvol_count);
- fprintf (fp, "Volume%d.arbiter_count: %d\n", count,
- volinfo->arbiter_count);
- fprintf (fp, "Volume%d.disperse_count: %d\n", count,
- volinfo->disperse_count);
- fprintf (fp, "Volume%d.redundancy_count: %d\n", count,
- volinfo->redundancy_count);
- fprintf (fp, "Volume%d.quorum_status: %s\n", count,
- quorum_status_str);
-
- fprintf (fp, "Volume%d.snapd_svc.online_status: %s\n", count,
- volinfo->snapd.svc.online ? "Online" : "Offline");
- fprintf (fp, "Volume%d.snapd_svc.inited: %s\n", count,
- volinfo->snapd.svc.inited ? "True" : "False");
-
- uuid_utoa_r (volinfo->rebal.rebalance_id, id_str);
- char *rebal_data = gf_uint64_2human_readable (
- volinfo->rebal.rebalance_data);
-
- fprintf (fp, "Volume%d.rebalance.id: %s\n", count, id_str);
- fprintf (fp, "Volume%d.rebalance.status: %s\n", count,
- rebal_status_str);
- fprintf (fp, "Volume%d.rebalance.failures: %"PRIu64"\n", count,
- volinfo->rebal.rebalance_failures);
- fprintf (fp, "Volume%d.rebalance.skipped: %"PRIu64"\n", count,
- volinfo->rebal.skipped_files);
- fprintf (fp, "Volume%d.rebalance.lookedup: %"PRIu64"\n", count,
- volinfo->rebal.lookedup_files);
- fprintf (fp, "Volume%d.rebalance.files: %"PRIu64"\n", count,
- volinfo->rebal.rebalance_files);
- fprintf (fp, "Volume%d.rebalance.data: %s\n", count, rebal_data);
- fprintf (fp, "Volume%d.time_left: %"PRIu64"\n", count,
- volinfo->rebal.time_left);
-
- GF_FREE (rebal_data);
+ fprintf(fp, "Volume%d.type: %s\n", count, vol_type_str);
+ fprintf(fp, "Volume%d.transport_type: %s\n", count, transport_type_str);
+ fprintf(fp, "Volume%d.status: %s\n", count, vol_status_str);
+ fprintf(fp, "Volume%d.profile_enabled: %d\n", count,
+ glusterd_is_profile_on(volinfo));
+ fprintf(fp, "Volume%d.brickcount: %d\n", count, volinfo->brick_count);
+ count_bkp = count;
+ count = 0;
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ fprintf(fp, "Volume%d.Brick%d.path: %s:%s\n", count_bkp, ++count,
+ brickinfo->hostname, brickinfo->path);
+ fprintf(fp, "Volume%d.Brick%d.hostname: %s\n", count_bkp, count,
+ brickinfo->hostname);
+ /* Determine which one is the arbiter brick */
+ if (volinfo->arbiter_count == 1) {
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_volume_get_hot_tier_type_str (
- volinfo, &hot_tier_type_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get hot tier type for "
- "volume: %s", volinfo->volname);
- goto out;
+ if (volinfo->tier_info.cold_replica_count != 1) {
+ start_index = volinfo->tier_info.hot_brick_count + 1;
+ if (count >= start_index &&
+ ((count - start_index + 1) %
+ volinfo->tier_info.cold_replica_count ==
+ 0)) {
+ fprintf(fp,
+ "Volume%d.Brick%d."
+ "is_arbiter: 1\n",
+ count_bkp, count);
}
-
- ret = glusterd_volume_get_cold_tier_type_str (
- volinfo, &cold_tier_type_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get cold tier type for "
- "volume: %s", volinfo->volname);
- goto out;
- }
-
- fprintf (fp, "Volume%d.tier_info.cold_tier_type: %s\n",
- count, cold_tier_type_str);
- fprintf (fp, "Volume%d.tier_info.cold_brick_count: %d\n",
- count, volinfo->tier_info.cold_brick_count);
- fprintf (fp, "Volume%d.tier_info.cold_replica_count: %d\n",
- count, volinfo->tier_info.cold_replica_count);
- fprintf (fp, "Volume%d.tier_info.cold_disperse_count: %d\n",
- count, volinfo->tier_info.cold_disperse_count);
- fprintf (fp, "Volume%d.tier_info.cold_dist_leaf_count: %d\n",
- count, volinfo->tier_info.cold_dist_leaf_count);
- fprintf (fp, "Volume%d.tier_info.cold_redundancy_count: %d\n",
- count, volinfo->tier_info.cold_redundancy_count);
- fprintf (fp, "Volume%d.tier_info.hot_tier_type: %s\n",
- count, hot_tier_type_str);
- fprintf (fp, "Volume%d.tier_info.hot_brick_count: %d\n",
- count, volinfo->tier_info.hot_brick_count);
- fprintf (fp, "Volume%d.tier_info.hot_replica_count: %d\n",
- count, volinfo->tier_info.hot_replica_count);
- fprintf (fp, "Volume%d.tier_info.promoted: %d\n",
- count, volinfo->tier_info.promoted);
- fprintf (fp, "Volume%d.tier_info.demoted: %d\n",
- count, volinfo->tier_info.demoted);
- }
-
- if (volinfo->rep_brick.src_brick && volinfo->rep_brick.dst_brick) {
- fprintf (fp, "Volume%d.replace_brick.src: %s:%s\n", count,
- volinfo->rep_brick.src_brick->hostname,
- volinfo->rep_brick.src_brick->path);
- fprintf (fp, "Volume%d.replace_brick.dest: %s:%s\n", count,
- volinfo->rep_brick.dst_brick->hostname,
- volinfo->rep_brick.dst_brick->path);
+ }
+ } else {
+ if (count % volinfo->replica_count == 0) {
+ fprintf(fp,
+ "Volume%d.Brick%d."
+ "is_arbiter: 1\n",
+ count_bkp, count);
+ }
}
+ }
+ /* Add following information only for bricks
+ * local to current node */
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
+ continue;
+ fprintf(fp, "Volume%d.Brick%d.port: %d\n", count_bkp, count,
+ brickinfo->port);
+ fprintf(fp, "Volume%d.Brick%d.rdma_port: %d\n", count_bkp, count,
+ brickinfo->rdma_port);
+ fprintf(fp, "Volume%d.Brick%d.port_registered: %d\n", count_bkp,
+ count, brickinfo->port_registered);
+ fprintf(fp, "Volume%d.Brick%d.status: %s\n", count_bkp, count,
+ brickinfo->status ? "Started" : "Stopped");
+
+ /*FIXME: This is a hacky way of figuring out whether a
+ * brick belongs to the hot or cold tier */
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ fprintf(fp, "Volume%d.Brick%d.tier: %s\n", count_bkp, count,
+ count <= volinfo->tier_info.hot_brick_count ? "Hot"
+ : "Cold");
+ }
+
+ ret = sys_statvfs(brickinfo->path, &brickstat);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "statfs error: %s ", strerror(errno));
+ goto out;
+ }
+
+ memfree = brickstat.f_bfree * brickstat.f_bsize;
+ memtotal = brickstat.f_blocks * brickstat.f_bsize;
+
+ fprintf(fp, "Volume%d.Brick%d.spacefree: %" PRIu64 "Bytes\n",
+ count_bkp, count, memfree);
+ fprintf(fp, "Volume%d.Brick%d.spacetotal: %" PRIu64 "Bytes\n",
+ count_bkp, count, memtotal);
+
+ if (get_state_cmd != GF_CLI_GET_STATE_DETAIL)
+ continue;
+
+ ret = glusterd_print_client_details(fp, dict, volinfo, count_bkp,
+ brickinfo, count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLIENTS_GET_STATE_FAILED,
+ "Failed to get client details");
+ goto out;
+ }
+ }
+
+ count = count_bkp;
+
+ ret = glusterd_print_snapinfo_by_vol(fp, volinfo, count);
+ if (ret)
+ goto out;
+
+ fprintf(fp, "Volume%d.snap_count: %" PRIu64 "\n", count,
+ volinfo->snap_count);
+ fprintf(fp, "Volume%d.stripe_count: %d\n", count,
+ volinfo->stripe_count);
+ fprintf(fp, "Volume%d.replica_count: %d\n", count,
+ volinfo->replica_count);
+ fprintf(fp, "Volume%d.subvol_count: %d\n", count,
+ volinfo->subvol_count);
+ fprintf(fp, "Volume%d.arbiter_count: %d\n", count,
+ volinfo->arbiter_count);
+ fprintf(fp, "Volume%d.disperse_count: %d\n", count,
+ volinfo->disperse_count);
+ fprintf(fp, "Volume%d.redundancy_count: %d\n", count,
+ volinfo->redundancy_count);
+ fprintf(fp, "Volume%d.quorum_status: %s\n", count, quorum_status_str);
+
+ fprintf(fp, "Volume%d.snapd_svc.online_status: %s\n", count,
+ volinfo->snapd.svc.online ? "Online" : "Offline");
+ fprintf(fp, "Volume%d.snapd_svc.inited: %s\n", count,
+ volinfo->snapd.svc.inited ? "True" : "False");
+
+ uuid_utoa_r(volinfo->rebal.rebalance_id, id_str);
+ char *rebal_data = gf_uint64_2human_readable(
+ volinfo->rebal.rebalance_data);
+
+ fprintf(fp, "Volume%d.rebalance.id: %s\n", count, id_str);
+ fprintf(fp, "Volume%d.rebalance.status: %s\n", count, rebal_status_str);
+ fprintf(fp, "Volume%d.rebalance.failures: %" PRIu64 "\n", count,
+ volinfo->rebal.rebalance_failures);
+ fprintf(fp, "Volume%d.rebalance.skipped: %" PRIu64 "\n", count,
+ volinfo->rebal.skipped_files);
+ fprintf(fp, "Volume%d.rebalance.lookedup: %" PRIu64 "\n", count,
+ volinfo->rebal.lookedup_files);
+ fprintf(fp, "Volume%d.rebalance.files: %" PRIu64 "\n", count,
+ volinfo->rebal.rebalance_files);
+ fprintf(fp, "Volume%d.rebalance.data: %s\n", count, rebal_data);
+ fprintf(fp, "Volume%d.time_left: %" PRIu64 "\n", count,
+ volinfo->rebal.time_left);
+
+ GF_FREE(rebal_data);
- volcount = count;
- ret = glusterd_print_gsync_status_by_vol (fp, volinfo);
- if (ret)
- goto out;
-
- if (volinfo->dict)
- dict_foreach (volinfo->dict,
- glusterd_print_volume_options, fp);
-
- fprintf (fp, "\n");
- }
-
- count = 0;
-
- fprintf (fp, "\n[Services]\n");
-
- if (priv->shd_svc.inited) {
- fprintf (fp, "svc%d.name: %s\n", ++count, priv->shd_svc.name);
- fprintf (fp, "svc%d.online_status: %s\n\n", count,
- priv->shd_svc.online ? "Online" : "Offline");
- }
-
- if (priv->nfs_svc.inited) {
- fprintf (fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
- fprintf (fp, "svc%d.online_status: %s\n\n", count,
- priv->nfs_svc.online ? "Online" : "Offline");
- }
-
- if (priv->bitd_svc.inited) {
- fprintf (fp, "svc%d.name: %s\n", ++count, priv->bitd_svc.name);
- fprintf (fp, "svc%d.online_status: %s\n\n", count,
- priv->bitd_svc.online ? "Online" : "Offline");
- }
-
- if (priv->scrub_svc.inited) {
- fprintf (fp, "svc%d.name: %s\n", ++count, priv->scrub_svc.name);
- fprintf (fp, "svc%d.online_status: %s\n\n", count,
- priv->scrub_svc.online ? "Online" : "Offline");
- }
-
- if (priv->quotad_svc.inited) {
- fprintf (fp, "svc%d.name: %s\n", ++count, priv->quotad_svc.name);
- fprintf (fp, "svc%d.online_status: %s\n\n", count,
- priv->quotad_svc.online ? "Online" : "Offline");
- }
-
- fprintf (fp, "\n[Misc]\n");
- if (priv->pmap) {
- fprintf (fp, "Base port: %d\n", priv->pmap->base_port);
- fprintf (fp, "Last allocated port: %d\n",
- priv->pmap->last_alloc);
- }
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ ret = glusterd_volume_get_hot_tier_type_str(volinfo,
+ &hot_tier_type_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get hot tier type for "
+ "volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+
+ ret = glusterd_volume_get_cold_tier_type_str(volinfo,
+ &cold_tier_type_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get cold tier type for "
+ "volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+
+ fprintf(fp, "Volume%d.tier_info.cold_tier_type: %s\n", count,
+ cold_tier_type_str);
+ fprintf(fp, "Volume%d.tier_info.cold_brick_count: %d\n", count,
+ volinfo->tier_info.cold_brick_count);
+ fprintf(fp, "Volume%d.tier_info.cold_replica_count: %d\n", count,
+ volinfo->tier_info.cold_replica_count);
+ fprintf(fp, "Volume%d.tier_info.cold_disperse_count: %d\n", count,
+ volinfo->tier_info.cold_disperse_count);
+ fprintf(fp, "Volume%d.tier_info.cold_dist_leaf_count: %d\n", count,
+ volinfo->tier_info.cold_dist_leaf_count);
+ fprintf(fp, "Volume%d.tier_info.cold_redundancy_count: %d\n", count,
+ volinfo->tier_info.cold_redundancy_count);
+ fprintf(fp, "Volume%d.tier_info.hot_tier_type: %s\n", count,
+ hot_tier_type_str);
+ fprintf(fp, "Volume%d.tier_info.hot_brick_count: %d\n", count,
+ volinfo->tier_info.hot_brick_count);
+ fprintf(fp, "Volume%d.tier_info.hot_replica_count: %d\n", count,
+ volinfo->tier_info.hot_replica_count);
+ fprintf(fp, "Volume%d.tier_info.promoted: %d\n", count,
+ volinfo->tier_info.promoted);
+ fprintf(fp, "Volume%d.tier_info.demoted: %d\n", count,
+ volinfo->tier_info.demoted);
+ }
+
+ if (volinfo->rep_brick.src_brick && volinfo->rep_brick.dst_brick) {
+ fprintf(fp, "Volume%d.replace_brick.src: %s:%s\n", count,
+ volinfo->rep_brick.src_brick->hostname,
+ volinfo->rep_brick.src_brick->path);
+ fprintf(fp, "Volume%d.replace_brick.dest: %s:%s\n", count,
+ volinfo->rep_brick.dst_brick->hostname,
+ volinfo->rep_brick.dst_brick->path);
+ }
+
+ volcount = count;
+ ret = glusterd_print_gsync_status_by_vol(fp, volinfo);
+ if (ret)
+ goto out;
+
+ if (volinfo->dict)
+ dict_foreach(volinfo->dict, glusterd_print_volume_options, fp);
+
+ fprintf(fp, "\n");
+ }
+
+ count = 0;
+
+ fprintf(fp, "\n[Services]\n");
+
+ if (priv->shd_svc.inited) {
+ fprintf(fp, "svc%d.name: %s\n", ++count, priv->shd_svc.name);
+ fprintf(fp, "svc%d.online_status: %s\n\n", count,
+ priv->shd_svc.online ? "Online" : "Offline");
+ }
+
+ if (priv->nfs_svc.inited) {
+ fprintf(fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
+ fprintf(fp, "svc%d.online_status: %s\n\n", count,
+ priv->nfs_svc.online ? "Online" : "Offline");
+ }
+
+ if (priv->bitd_svc.inited) {
+ fprintf(fp, "svc%d.name: %s\n", ++count, priv->bitd_svc.name);
+ fprintf(fp, "svc%d.online_status: %s\n\n", count,
+ priv->bitd_svc.online ? "Online" : "Offline");
+ }
+
+ if (priv->scrub_svc.inited) {
+ fprintf(fp, "svc%d.name: %s\n", ++count, priv->scrub_svc.name);
+ fprintf(fp, "svc%d.online_status: %s\n\n", count,
+ priv->scrub_svc.online ? "Online" : "Offline");
+ }
+
+ if (priv->quotad_svc.inited) {
+ fprintf(fp, "svc%d.name: %s\n", ++count, priv->quotad_svc.name);
+ fprintf(fp, "svc%d.online_status: %s\n\n", count,
+ priv->quotad_svc.online ? "Online" : "Offline");
+ }
+
+ fprintf(fp, "\n[Misc]\n");
+ if (priv->pmap) {
+ fprintf(fp, "Base port: %d\n", priv->pmap->base_port);
+ fprintf(fp, "Last allocated port: %d\n", priv->pmap->last_alloc);
+ }
out:
- if (fp)
- fclose(fp);
+ if (fp)
+ fclose(fp);
- rsp.op_ret = ret;
- if (rsp.op_errstr == NULL)
- rsp.op_errstr = err_str;
+ rsp.op_ret = ret;
+ if (rsp.op_errstr == NULL)
+ rsp.op_errstr = err_str;
- ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
- glusterd_to_cli (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_cli_rsp, dict);
+ ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
- return ret;
+ return ret;
}
static int
-__glusterd_handle_get_state (rpcsvc_request_t *req)
+__glusterd_handle_get_state(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,},};
- dict_t *dict = NULL;
- char err_str[64] = {0,};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO (THIS->name, this, out);
- GF_VALIDATE_OR_GOTO (this->name, req, out);
-
- gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DAEMON_STATE_REQ_RCVD,
- "Received request to get state for glusterd");
+ int32_t ret = -1;
+ gf_cli_req cli_req = {
+ {
+ 0,
+ },
+ };
+ dict_t *dict = NULL;
+ char err_str[64] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(THIS->name, this, out);
+ GF_VALIDATE_OR_GOTO(this->name, req, out);
+
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DAEMON_STATE_REQ_RCVD,
+ "Received request to get state for glusterd");
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to decode "
+ "request received from cli");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
+ err_str);
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- snprintf (err_str, sizeof (err_str), "Failed to decode "
- "request received from cli");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (err_str, sizeof (err_str), "Unable to decode"
- " the command");
- goto out;
- } else {
- dict->extra_stdfree = cli_req.dict.dict_val;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode"
+ " the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
}
+ }
- ret = glusterd_get_state (req, dict);
+ ret = glusterd_get_state(req, dict);
out:
- if (dict && ret) {
- /*
- * When glusterd_to_cli (called from glusterd_get_state)
- * succeeds, it frees the dict for us, so this would be a
- * double free, but in other cases it's our responsibility.
- */
- dict_unref (dict);
- }
- return ret;
+ if (dict && ret) {
+ /*
+ * When glusterd_to_cli (called from glusterd_get_state)
+ * succeeds, it frees the dict for us, so this would be a
+ * double free, but in other cases it's our responsibility.
+ */
+ dict_unref(dict);
+ }
+ return ret;
}
int
-glusterd_handle_get_state (rpcsvc_request_t *req)
+glusterd_handle_get_state(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_get_state);
+ return glusterd_big_locked_handler(req, __glusterd_handle_get_state);
}
static int
-get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
+get_brickinfo_from_brickid(char *brickid, glusterd_brickinfo_t **brickinfo)
{
- glusterd_volinfo_t *volinfo = NULL;
- char *volid_str = NULL;
- char *brick = NULL;
- char *brickid_dup = NULL;
- uuid_t volid = {0};
- int ret = -1;
-
- brickid_dup = gf_strdup (brickid);
- if (!brickid_dup)
- goto out;
-
- volid_str = brickid_dup;
- brick = strchr (brickid_dup, ':');
- if (!volid_str || !brick)
- goto out;
-
- *brick = '\0';
- brick++;
- gf_uuid_parse (volid_str, volid);
- ret = glusterd_volinfo_find_by_volume_id (volid, &volinfo);
- if (ret) {
- /* Check if it a snapshot volume */
- ret = glusterd_snap_volinfo_find_by_volume_id (volid, &volinfo);
- if (ret)
- goto out;
- }
-
- ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
- brickinfo,
- _gf_false);
+ glusterd_volinfo_t *volinfo = NULL;
+ char *volid_str = NULL;
+ char *brick = NULL;
+ char *brickid_dup = NULL;
+ uuid_t volid = {0};
+ int ret = -1;
+
+ brickid_dup = gf_strdup(brickid);
+ if (!brickid_dup)
+ goto out;
+
+ volid_str = brickid_dup;
+ brick = strchr(brickid_dup, ':');
+ if (!volid_str || !brick)
+ goto out;
+
+ *brick = '\0';
+ brick++;
+ gf_uuid_parse(volid_str, volid);
+ ret = glusterd_volinfo_find_by_volume_id(volid, &volinfo);
+ if (ret) {
+ /* Check if it a snapshot volume */
+ ret = glusterd_snap_volinfo_find_by_volume_id(volid, &volinfo);
if (ret)
- goto out;
+ goto out;
+ }
- ret = 0;
+ ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, brickinfo,
+ _gf_false);
+ if (ret)
+ goto out;
+
+ ret = 0;
out:
- GF_FREE (brickid_dup);
- return ret;
+ GF_FREE(brickid_dup);
+ return ret;
}
static int gd_stale_rpc_disconnect_log;
static int
-glusterd_mark_bricks_stopped_by_proc (glusterd_brick_proc_t *brick_proc) {
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *brickinfo_tmp = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- int ret = -1;
-
- cds_list_for_each_entry (brickinfo, &brick_proc->bricks, brick_list) {
- ret = glusterd_get_volinfo_from_brick (brickinfo->path,
- &volinfo);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL, "Failed to get volinfo"
- " from brick(%s)", brickinfo->path);
- goto out;
- }
- cds_list_for_each_entry (brickinfo_tmp, &volinfo->bricks,
- brick_list) {
- if (strcmp (brickinfo->path,
- brickinfo_tmp->path) == 0) {
- glusterd_set_brick_status (brickinfo_tmp,
- GF_BRICK_STOPPED);
- brickinfo_tmp->start_triggered = _gf_false;
- /* When bricks are stopped, ports also need to
- * be cleaned up
- */
- pmap_registry_remove (THIS, brickinfo_tmp->port,
- brickinfo_tmp->path,
- GF_PMAP_PORT_BRICKSERVER,
- NULL, _gf_true);
-
- }
- }
+glusterd_mark_bricks_stopped_by_proc(glusterd_brick_proc_t *brick_proc)
+{
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *brickinfo_tmp = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = -1;
+
+ cds_list_for_each_entry(brickinfo, &brick_proc->bricks, brick_list)
+ {
+ ret = glusterd_get_volinfo_from_brick(brickinfo->path, &volinfo);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo"
+ " from brick(%s)",
+ brickinfo->path);
+ goto out;
}
- return 0;
+ cds_list_for_each_entry(brickinfo_tmp, &volinfo->bricks, brick_list)
+ {
+ if (strcmp(brickinfo->path, brickinfo_tmp->path) == 0) {
+ glusterd_set_brick_status(brickinfo_tmp, GF_BRICK_STOPPED);
+ brickinfo_tmp->start_triggered = _gf_false;
+ /* When bricks are stopped, ports also need to
+ * be cleaned up
+ */
+ pmap_registry_remove(THIS, brickinfo_tmp->port,
+ brickinfo_tmp->path,
+ GF_PMAP_PORT_BRICKSERVER, NULL, _gf_true);
+ }
+ }
+ }
+ return 0;
out:
- return ret;
+ return ret;
}
int
-__glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
+__glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- char *brickid = NULL;
- int ret = 0;
- glusterd_conf_t *conf = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- int brick_proc_found = 0;
- int32_t pid = -1;
- glusterd_brickinfo_t *brickinfo_tmp = NULL;
- glusterd_brick_proc_t *brick_proc = NULL;
- char pidfile[PATH_MAX] = {0};
-
- brickid = mydata;
- if (!brickid)
- return 0;
-
- ret = get_brickinfo_from_brickid (brickid, &brickinfo);
- if (ret)
- return 0;
+ char *brickid = NULL;
+ int ret = 0;
+ glusterd_conf_t *conf = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ int brick_proc_found = 0;
+ int32_t pid = -1;
+ glusterd_brickinfo_t *brickinfo_tmp = NULL;
+ glusterd_brick_proc_t *brick_proc = NULL;
+ char pidfile[PATH_MAX] = {0};
+
+ brickid = mydata;
+ if (!brickid)
+ return 0;
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
+ ret = get_brickinfo_from_brickid(brickid, &brickinfo);
+ if (ret)
+ return 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
- switch (event) {
+ switch (event) {
case RPC_CLNT_CONNECT:
- ret = get_volinfo_from_brickid (brickid, &volinfo);
+ ret = get_volinfo_from_brickid(brickid, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "brickid(%s)",
+ brickid);
+ goto out;
+ }
+ /* If a node on coming back up, already starts a brick
+ * before the handshake, and the notification comes after
+ * the handshake is done, then we need to check if this
+ * is a restored brick with a snapshot pending. If so, we
+ * need to stop the brick
+ */
+ if (brickinfo->snap_status == -1) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SNAPSHOT_PENDING,
+ "Snapshot is pending on %s:%s. "
+ "Hence not starting the brick",
+ brickinfo->hostname, brickinfo->path);
+ ret = glusterd_brick_stop(volinfo, brickinfo, _gf_false);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "Failed to get volinfo from "
- "brickid(%s)", brickid);
- goto out;
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_STOP_FAIL,
+ "Unable to stop %s:%s", brickinfo->hostname,
+ brickinfo->path);
+ goto out;
}
- /* If a node on coming back up, already starts a brick
- * before the handshake, and the notification comes after
- * the handshake is done, then we need to check if this
- * is a restored brick with a snapshot pending. If so, we
- * need to stop the brick
- */
- if (brickinfo->snap_status == -1) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_SNAPSHOT_PENDING,
- "Snapshot is pending on %s:%s. "
- "Hence not starting the brick",
- brickinfo->hostname,
- brickinfo->path);
- ret = glusterd_brick_stop (volinfo, brickinfo,
- _gf_false);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_STOP_FAIL,
- "Unable to stop %s:%s",
- brickinfo->hostname, brickinfo->path);
- goto out;
- }
- break;
- }
- gf_msg_debug (this->name, 0, "Connected to %s:%s",
- brickinfo->hostname, brickinfo->path);
+ break;
+ }
+ gf_msg_debug(this->name, 0, "Connected to %s:%s",
+ brickinfo->hostname, brickinfo->path);
- glusterd_set_brick_status (brickinfo, GF_BRICK_STARTED);
+ glusterd_set_brick_status(brickinfo, GF_BRICK_STARTED);
- gf_event (EVENT_BRICK_CONNECTED, "peer=%s;volume=%s;brick=%s",
- brickinfo->hostname, volinfo->volname,
- brickinfo->path);
+ gf_event(EVENT_BRICK_CONNECTED, "peer=%s;volume=%s;brick=%s",
+ brickinfo->hostname, volinfo->volname, brickinfo->path);
- ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
+ ret = default_notify(this, GF_EVENT_CHILD_UP, NULL);
- break;
+ break;
case RPC_CLNT_DISCONNECT:
- if (rpc != brickinfo->rpc) {
- /*
- * There used to be a bunch of races in the volume
- * start/stop code that could result in us getting here
- * and setting the brick status incorrectly. Many of
- * those have been fixed or avoided, but just in case
- * any are still left it doesn't hurt to keep the extra
- * check and avoid further damage.
- */
- GF_LOG_OCCASIONALLY (gd_stale_rpc_disconnect_log,
- this->name, GF_LOG_WARNING,
- "got disconnect from stale rpc on "
- "%s", brickinfo->path);
- break;
+ if (rpc != brickinfo->rpc) {
+ /*
+ * There used to be a bunch of races in the volume
+ * start/stop code that could result in us getting here
+ * and setting the brick status incorrectly. Many of
+ * those have been fixed or avoided, but just in case
+ * any are still left it doesn't hurt to keep the extra
+ * check and avoid further damage.
+ */
+ GF_LOG_OCCASIONALLY(gd_stale_rpc_disconnect_log, this->name,
+ GF_LOG_WARNING,
+ "got disconnect from stale rpc on "
+ "%s",
+ brickinfo->path);
+ break;
+ }
+ if (glusterd_is_brick_started(brickinfo)) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BRICK_DISCONNECTED,
+ "Brick %s:%s has disconnected from glusterd.",
+ brickinfo->hostname, brickinfo->path);
+
+ ret = get_volinfo_from_brickid(brickid, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Failed to get volinfo from "
+ "brickid(%s)",
+ brickid);
+ goto out;
}
- if (glusterd_is_brick_started (brickinfo)) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_BRICK_DISCONNECTED,
- "Brick %s:%s has disconnected from glusterd.",
- brickinfo->hostname, brickinfo->path);
-
- ret = get_volinfo_from_brickid (brickid, &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "Failed to get volinfo from "
- "brickid(%s)", brickid);
- goto out;
- }
- gf_event (EVENT_BRICK_DISCONNECTED,
- "peer=%s;volume=%s;brick=%s",
- brickinfo->hostname, volinfo->volname,
- brickinfo->path);
- /* In case of an abrupt shutdown of a brick PMAP_SIGNOUT
- * event is not received by glusterd which can lead to a
- * stale port entry in glusterd, so forcibly clean up
- * the same if the process is not running
- */
- GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo,
- brickinfo, conf);
- if (!gf_is_service_running (pidfile, &pid)) {
- ret = pmap_registry_remove (
- THIS, brickinfo->port,
- brickinfo->path,
- GF_PMAP_PORT_BRICKSERVER,
- NULL, _gf_true);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING,
- GD_MSG_PMAP_REGISTRY_REMOVE_FAIL,
- 0, "Failed to remove pmap "
- "registry for port %d for "
- "brick %s", brickinfo->port,
- brickinfo->path);
- ret = 0;
- }
- }
+ gf_event(EVENT_BRICK_DISCONNECTED, "peer=%s;volume=%s;brick=%s",
+ brickinfo->hostname, volinfo->volname,
+ brickinfo->path);
+ /* In case of an abrupt shutdown of a brick PMAP_SIGNOUT
+ * event is not received by glusterd which can lead to a
+ * stale port entry in glusterd, so forcibly clean up
+ * the same if the process is not running
+ */
+ GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo, conf);
+ if (!gf_is_service_running(pidfile, &pid)) {
+ ret = pmap_registry_remove(
+ THIS, brickinfo->port, brickinfo->path,
+ GF_PMAP_PORT_BRICKSERVER, NULL, _gf_true);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING,
+ GD_MSG_PMAP_REGISTRY_REMOVE_FAIL, 0,
+ "Failed to remove pmap "
+ "registry for port %d for "
+ "brick %s",
+ brickinfo->port, brickinfo->path);
+ ret = 0;
+ }
}
-
- if (is_brick_mx_enabled()) {
- cds_list_for_each_entry (brick_proc, &conf->brick_procs,
- brick_proc_list) {
- cds_list_for_each_entry (brickinfo_tmp,
- &brick_proc->bricks,
- brick_list) {
- if (strcmp (brickinfo_tmp->path,
- brickinfo->path) == 0) {
- ret = glusterd_mark_bricks_stopped_by_proc
- (brick_proc);
- if (ret) {
- gf_msg(THIS->name,
- GF_LOG_ERROR, 0,
- GD_MSG_BRICK_STOP_FAIL,
- "Unable to stop "
- "bricks of process"
- " to which brick(%s)"
- " belongs",
- brickinfo->path);
- goto out;
- }
- brick_proc_found = 1;
- break;
- }
- }
- if (brick_proc_found == 1)
- break;
+ }
+
+ if (is_brick_mx_enabled()) {
+ cds_list_for_each_entry(brick_proc, &conf->brick_procs,
+ brick_proc_list)
+ {
+ cds_list_for_each_entry(brickinfo_tmp, &brick_proc->bricks,
+ brick_list)
+ {
+ if (strcmp(brickinfo_tmp->path, brickinfo->path) == 0) {
+ ret = glusterd_mark_bricks_stopped_by_proc(
+ brick_proc);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRICK_STOP_FAIL,
+ "Unable to stop "
+ "bricks of process"
+ " to which brick(%s)"
+ " belongs",
+ brickinfo->path);
+ goto out;
+ }
+ brick_proc_found = 1;
+ break;
}
- } else {
- glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPED);
- brickinfo->start_triggered = _gf_false;
+ }
+ if (brick_proc_found == 1)
+ break;
}
- break;
+ } else {
+ glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPED);
+ brickinfo->start_triggered = _gf_false;
+ }
+ break;
case RPC_CLNT_DESTROY:
- GF_FREE (mydata);
- mydata = NULL;
- break;
+ GF_FREE(mydata);
+ mydata = NULL;
+ break;
default:
- gf_msg_trace (this->name, 0,
- "got some other RPC event %d", event);
- break;
- }
+ gf_msg_trace(this->name, 0, "got some other RPC event %d", event);
+ break;
+ }
out:
- return ret;
+ return ret;
}
int
-glusterd_brick_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
+glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- return glusterd_big_locked_notify (rpc, mydata, event, data,
- __glusterd_brick_rpc_notify);
+ return glusterd_big_locked_notify(rpc, mydata, event, data,
+ __glusterd_brick_rpc_notify);
}
int
-glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx, int32_t op_errno)
+glusterd_friend_remove_notify(glusterd_peerctx_t *peerctx, int32_t op_errno)
{
- int ret = -1;
- glusterd_friend_sm_event_t *new_event = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- rpcsvc_request_t *req = NULL;
- char *errstr = NULL;
- dict_t *dict = NULL;
+ int ret = -1;
+ glusterd_friend_sm_event_t *new_event = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ rpcsvc_request_t *req = NULL;
+ char *errstr = NULL;
+ dict_t *dict = NULL;
+
+ GF_ASSERT(peerctx);
+
+ rcu_read_lock();
+ peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
+ if (!peerinfo) {
+ gf_msg_debug(THIS->name, 0,
+ "Could not find peer %s(%s). "
+ "Peer could have been deleted.",
+ peerctx->peername, uuid_utoa(peerctx->peerid));
+ ret = 0;
+ goto out;
+ }
- GF_ASSERT (peerctx);
+ req = peerctx->args.req;
+ dict = peerctx->args.dict;
+ errstr = peerctx->errstr;
- rcu_read_lock ();
- peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
- if (!peerinfo) {
- gf_msg_debug (THIS->name, 0, "Could not find peer %s(%s). "
- "Peer could have been deleted.", peerctx->peername,
- uuid_utoa (peerctx->peerid));
- ret = 0;
- goto out;
+ ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_REMOVE_FRIEND,
+ &new_event);
+ if (!ret) {
+ if (!req) {
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_EVENT_NEW_GET_FAIL,
+ "Unable to find the request for responding "
+ "to User (%s)",
+ peerinfo->hostname);
+ goto out;
}
- req = peerctx->args.req;
- dict = peerctx->args.dict;
- errstr = peerctx->errstr;
-
- ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_REMOVE_FRIEND,
- &new_event);
- if (!ret) {
- if (!req) {
- gf_msg (THIS->name, GF_LOG_WARNING, 0,
- GD_MSG_EVENT_NEW_GET_FAIL,
- "Unable to find the request for responding "
- "to User (%s)", peerinfo->hostname);
- goto out;
- }
+ glusterd_xfer_cli_probe_resp(req, -1, op_errno, errstr,
+ peerinfo->hostname, peerinfo->port, dict);
- glusterd_xfer_cli_probe_resp (req, -1, op_errno, errstr,
- peerinfo->hostname,
- peerinfo->port, dict);
+ new_event->peername = gf_strdup(peerinfo->hostname);
+ gf_uuid_copy(new_event->peerid, peerinfo->uuid);
+ ret = glusterd_friend_sm_inject_event(new_event);
- new_event->peername = gf_strdup (peerinfo->hostname);
- gf_uuid_copy (new_event->peerid, peerinfo->uuid);
- ret = glusterd_friend_sm_inject_event (new_event);
-
- } else {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_EVENT_INJECT_FAIL,
- "Unable to create event for removing peer %s",
- peerinfo->hostname);
- }
+ } else {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
+ "Unable to create event for removing peer %s",
+ peerinfo->hostname);
+ }
out:
- rcu_read_unlock ();
- return ret;
+ rcu_read_unlock();
+ return ret;
}
int
-__glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
+__glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- int ret = 0;
- int32_t op_errno = ENOTCONN;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peerctx_t *peerctx = NULL;
- gf_boolean_t quorum_action = _gf_false;
- glusterd_volinfo_t *volinfo = NULL;
- uuid_t uuid;
-
- peerctx = mydata;
- if (!peerctx)
- return 0;
-
- this = THIS;
- conf = this->private;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int ret = 0;
+ int32_t op_errno = ENOTCONN;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_t *peerctx = NULL;
+ gf_boolean_t quorum_action = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
+ uuid_t uuid;
+
+ peerctx = mydata;
+ if (!peerctx)
+ return 0;
+
+ this = THIS;
+ conf = this->private;
- switch (event) {
+ switch (event) {
case RPC_CLNT_DESTROY:
- GF_FREE (peerctx->errstr);
- GF_FREE (peerctx->peername);
- GF_FREE (peerctx);
- return 0;
+ GF_FREE(peerctx->errstr);
+ GF_FREE(peerctx->peername);
+ GF_FREE(peerctx);
+ return 0;
case RPC_CLNT_PING:
- return 0;
+ return 0;
default:
- break;
- }
+ break;
+ }
- rcu_read_lock ();
+ rcu_read_lock();
- peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
- if (!peerinfo) {
- /* Peerinfo should be available at this point if its a connect
- * event. Not finding it means that something terrible has
- * happened. For non-connect event we might end up having a null
- * peerinfo, so log at debug level.
- */
- gf_msg (THIS->name, (RPC_CLNT_CONNECT == event) ?
- GF_LOG_CRITICAL : GF_LOG_DEBUG, ENOENT,
- GD_MSG_PEER_NOT_FOUND, "Could not find peer "
- "%s(%s)", peerctx->peername,
- uuid_utoa (peerctx->peerid));
-
- if (RPC_CLNT_CONNECT == event) {
- gf_event (EVENT_PEER_NOT_FOUND, "peer=%s;uuid=%s",
- peerctx->peername,
- uuid_utoa (peerctx->peerid));
- }
- ret = -1;
- goto out;
- }
+ peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
+ if (!peerinfo) {
+ /* Peerinfo should be available at this point if its a connect
+ * event. Not finding it means that something terrible has
+ * happened. For non-connect event we might end up having a null
+ * peerinfo, so log at debug level.
+ */
+ gf_msg(THIS->name,
+ (RPC_CLNT_CONNECT == event) ? GF_LOG_CRITICAL : GF_LOG_DEBUG,
+ ENOENT, GD_MSG_PEER_NOT_FOUND,
+ "Could not find peer "
+ "%s(%s)",
+ peerctx->peername, uuid_utoa(peerctx->peerid));
- switch (event) {
- case RPC_CLNT_CONNECT:
- {
- gf_msg_debug (this->name, 0, "got RPC_CLNT_CONNECT");
- peerinfo->connected = 1;
- peerinfo->quorum_action = _gf_true;
- peerinfo->generation = uatomic_add_return
- (&conf->generation, 1);
- peerctx->peerinfo_gen = peerinfo->generation;
- /* EVENT_PEER_CONNECT will only be sent if peerctx->uuid is not
- * NULL, otherwise it indicates this RPC_CLNT_CONNECT is from a
- * peer probe trigger and given we already generate an event for
- * peer probe this would be unnecessary.
- */
- if (!gf_uuid_is_null (peerinfo->uuid)) {
- gf_event (EVENT_PEER_CONNECT, "host=%s;uuid=%s",
- peerinfo->hostname,
- uuid_utoa (peerinfo->uuid));
- }
- ret = glusterd_peer_dump_version (this, rpc, peerctx);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_HANDSHAKE_FAILED,
- "glusterd handshake failed");
- break;
+ if (RPC_CLNT_CONNECT == event) {
+ gf_event(EVENT_PEER_NOT_FOUND, "peer=%s;uuid=%s", peerctx->peername,
+ uuid_utoa(peerctx->peerid));
}
+ ret = -1;
+ goto out;
+ }
+
+ switch (event) {
+ case RPC_CLNT_CONNECT: {
+ gf_msg_debug(this->name, 0, "got RPC_CLNT_CONNECT");
+ peerinfo->connected = 1;
+ peerinfo->quorum_action = _gf_true;
+ peerinfo->generation = uatomic_add_return(&conf->generation, 1);
+ peerctx->peerinfo_gen = peerinfo->generation;
+ /* EVENT_PEER_CONNECT will only be sent if peerctx->uuid is not
+ * NULL, otherwise it indicates this RPC_CLNT_CONNECT is from a
+ * peer probe trigger and given we already generate an event for
+ * peer probe this would be unnecessary.
+ */
+ if (!gf_uuid_is_null(peerinfo->uuid)) {
+ gf_event(EVENT_PEER_CONNECT, "host=%s;uuid=%s",
+ peerinfo->hostname, uuid_utoa(peerinfo->uuid));
+ }
+ ret = glusterd_peer_dump_version(this, rpc, peerctx);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDSHAKE_FAILED,
+ "glusterd handshake failed");
+ break;
+ }
+
+ case RPC_CLNT_DISCONNECT: {
+ /* If DISCONNECT event is already processed, skip the further
+ * ones
+ */
+ if (is_rpc_clnt_disconnected(&rpc->conn))
+ break;
- case RPC_CLNT_DISCONNECT:
- {
- /* If DISCONNECT event is already processed, skip the further
- * ones
- */
- if (is_rpc_clnt_disconnected (&rpc->conn))
- break;
-
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_PEER_DISCONNECTED,
- "Peer <%s> (<%s>), in state <%s>, has disconnected "
- "from glusterd.",
- peerinfo->hostname, uuid_utoa (peerinfo->uuid),
- glusterd_friend_sm_state_name_get (peerinfo->state.state));
- gf_event (EVENT_PEER_DISCONNECT, "peer=%s;uuid=%s;state=%s",
- peerinfo->hostname, uuid_utoa (peerinfo->uuid),
- glusterd_friend_sm_state_name_get (peerinfo->state.state));
-
- if (peerinfo->connected) {
- if (conf->op_version < GD_OP_VERSION_3_6_0) {
- glusterd_get_lock_owner (&uuid);
- if (!gf_uuid_is_null (uuid) &&
- !gf_uuid_compare (peerinfo->uuid, uuid))
- glusterd_unlock (peerinfo->uuid);
- } else {
- cds_list_for_each_entry (volinfo,
- &conf->volumes,
- vol_list) {
- ret = glusterd_mgmt_v3_unlock
- (volinfo->volname,
- peerinfo->uuid,
- "vol");
- if (ret)
- gf_msg (this->name,
- GF_LOG_WARNING, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Lock not released "
- "for %s",
- volinfo->volname);
- }
- }
-
- op_errno = GF_PROBE_ANOTHER_CLUSTER;
- ret = 0;
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_PEER_DISCONNECTED,
+ "Peer <%s> (<%s>), in state <%s>, has disconnected "
+ "from glusterd.",
+ peerinfo->hostname, uuid_utoa(peerinfo->uuid),
+ glusterd_friend_sm_state_name_get(peerinfo->state.state));
+ gf_event(EVENT_PEER_DISCONNECT, "peer=%s;uuid=%s;state=%s",
+ peerinfo->hostname, uuid_utoa(peerinfo->uuid),
+ glusterd_friend_sm_state_name_get(peerinfo->state.state));
+
+ if (peerinfo->connected) {
+ if (conf->op_version < GD_OP_VERSION_3_6_0) {
+ glusterd_get_lock_owner(&uuid);
+ if (!gf_uuid_is_null(uuid) &&
+ !gf_uuid_compare(peerinfo->uuid, uuid))
+ glusterd_unlock(peerinfo->uuid);
+ } else {
+ cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
+ {
+ ret = glusterd_mgmt_v3_unlock(volinfo->volname,
+ peerinfo->uuid, "vol");
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Lock not released "
+ "for %s",
+ volinfo->volname);
+ }
}
- if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
- (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {
- peerinfo->quorum_contrib = QUORUM_DOWN;
- quorum_action = _gf_true;
- peerinfo->quorum_action = _gf_false;
- }
+ op_errno = GF_PROBE_ANOTHER_CLUSTER;
+ ret = 0;
+ }
- /* Remove peer if it is not a friend and connection/handshake
- * fails, and notify cli. Happens only during probe.
- */
- if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) {
- glusterd_friend_remove_notify (peerctx, op_errno);
- goto out;
- }
+ if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
+ (peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {
+ peerinfo->quorum_contrib = QUORUM_DOWN;
+ quorum_action = _gf_true;
+ peerinfo->quorum_action = _gf_false;
+ }
- peerinfo->connected = 0;
- break;
+ /* Remove peer if it is not a friend and connection/handshake
+ * fails, and notify cli. Happens only during probe.
+ */
+ if (peerinfo->state.state == GD_FRIEND_STATE_DEFAULT) {
+ glusterd_friend_remove_notify(peerctx, op_errno);
+ goto out;
+ }
+
+ peerinfo->connected = 0;
+ break;
}
default:
- gf_msg_trace (this->name, 0,
- "got some other RPC event %d", event);
- ret = 0;
- break;
- }
+ gf_msg_trace(this->name, 0, "got some other RPC event %d", event);
+ ret = 0;
+ break;
+ }
out:
- rcu_read_unlock ();
+ rcu_read_unlock();
- glusterd_friend_sm ();
- glusterd_op_sm ();
- if (quorum_action)
- glusterd_do_quorum_action ();
- return ret;
+ glusterd_friend_sm();
+ glusterd_op_sm();
+ if (quorum_action)
+ glusterd_do_quorum_action();
+ return ret;
}
int
-glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
- rpc_clnt_event_t event, void *data)
+glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
{
- return glusterd_big_locked_notify (rpc, mydata, event, data,
- __glusterd_peer_rpc_notify);
+ return glusterd_big_locked_notify(rpc, mydata, event, data,
+ __glusterd_peer_rpc_notify);
}
int
-glusterd_null (rpcsvc_request_t *req)
+glusterd_null(rpcsvc_request_t *req)
{
-
- return 0;
+ return 0;
}
rpcsvc_actor_t gd_svc_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
- [GLUSTERD_MGMT_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GLUSTERD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_STAGE_OP] = { "STAGE_OP", GLUSTERD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_COMMIT_OP] = { "COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_NULL] = {"NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0,
+ DRC_NA},
+ [GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK,
+ glusterd_handle_cluster_lock, NULL, 0,
+ DRC_NA},
+ [GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK",
+ GLUSTERD_MGMT_CLUSTER_UNLOCK,
+ glusterd_handle_cluster_unlock, NULL, 0,
+ DRC_NA},
+ [GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", GLUSTERD_MGMT_STAGE_OP,
+ glusterd_handle_stage_op, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_COMMIT_OP] = {"COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP,
+ glusterd_handle_commit_op, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_mgmt_prog = {
- .progname = "GlusterD svc mgmt",
- .prognum = GD_MGMT_PROGRAM,
- .progver = GD_MGMT_VERSION,
- .numactors = GLUSTERD_MGMT_MAXVALUE,
- .actors = gd_svc_mgmt_actors,
- .synctask = _gf_true,
+ .progname = "GlusterD svc mgmt",
+ .prognum = GD_MGMT_PROGRAM,
+ .progver = GD_MGMT_VERSION,
+ .numactors = GLUSTERD_MGMT_MAXVALUE,
+ .actors = gd_svc_mgmt_actors,
+ .synctask = _gf_true,
};
rpcsvc_actor_t gd_svc_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
- [GLUSTERD_FRIEND_NULL] = { "NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0, DRC_NA},
- [GLUSTERD_PROBE_QUERY] = { "PROBE_QUERY", GLUSTERD_PROBE_QUERY, glusterd_handle_probe_query, NULL, 0, DRC_NA},
- [GLUSTERD_FRIEND_ADD] = { "FRIEND_ADD", GLUSTERD_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, 0, DRC_NA},
- [GLUSTERD_FRIEND_REMOVE] = { "FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, 0, DRC_NA},
- [GLUSTERD_FRIEND_UPDATE] = { "FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, 0, DRC_NA},
+ [GLUSTERD_FRIEND_NULL] = {"NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL,
+ 0, DRC_NA},
+ [GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", GLUSTERD_PROBE_QUERY,
+ glusterd_handle_probe_query, NULL, 0, DRC_NA},
+ [GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", GLUSTERD_FRIEND_ADD,
+ glusterd_handle_incoming_friend_req, NULL, 0,
+ DRC_NA},
+ [GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE,
+ glusterd_handle_incoming_unfriend_req, NULL, 0,
+ DRC_NA},
+ [GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE,
+ glusterd_handle_friend_update, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_peer_prog = {
- .progname = "GlusterD svc peer",
- .prognum = GD_FRIEND_PROGRAM,
- .progver = GD_FRIEND_VERSION,
- .numactors = GLUSTERD_FRIEND_MAXVALUE,
- .actors = gd_svc_peer_actors,
- .synctask = _gf_false,
+ .progname = "GlusterD svc peer",
+ .prognum = GD_FRIEND_PROGRAM,
+ .progver = GD_FRIEND_VERSION,
+ .numactors = GLUSTERD_FRIEND_MAXVALUE,
+ .actors = gd_svc_peer_actors,
+ .synctask = _gf_false,
};
-
-
rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
- [GLUSTER_CLI_PROBE] = { "CLI_PROBE", GLUSTER_CLI_PROBE, glusterd_handle_cli_probe, NULL, 0, DRC_NA},
- [GLUSTER_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GLUSTER_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GLUSTER_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_DEPROBE] = { "FRIEND_REMOVE", GLUSTER_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, 0, DRC_NA},
- [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
- [GLUSTER_CLI_UUID_RESET] = { "UUID_RESET", GLUSTER_CLI_UUID_RESET, glusterd_handle_cli_uuid_reset, NULL, 0, DRC_NA},
- [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
- [GLUSTER_CLI_START_VOLUME] = { "START_VOLUME", GLUSTER_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_STOP_VOLUME] = { "STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_ADD_BRICK] = { "ADD_BRICK", GLUSTER_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, 0, DRC_NA},
- [GLUSTER_CLI_ATTACH_TIER] = { "ATTACH_TIER", GLUSTER_CLI_ATTACH_TIER, glusterd_handle_attach_tier, NULL, 0, DRC_NA},
- [GLUSTER_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, 0, DRC_NA},
- [GLUSTER_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, 0, DRC_NA},
- [GLUSTER_CLI_LOG_ROTATE] = { "LOG FILENAME", GLUSTER_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SET_VOLUME] = { "SET_VOLUME", GLUSTER_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_RESET_VOLUME] = { "RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_FSM_LOG] = { "FSM_LOG", GLUSTER_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GSYNC_SET] = { "GSYNC_SET", GLUSTER_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, 0, DRC_NA},
- [GLUSTER_CLI_PROFILE_VOLUME] = { "STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME, glusterd_handle_cli_profile_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_QUOTA] = { "QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA},
- [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_MOUNT] = { "MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount, NULL, 1, DRC_NA},
- [GLUSTER_CLI_UMOUNT] = { "UMOUNT", GLUSTER_CLI_UMOUNT, glusterd_handle_umount, NULL, 1, DRC_NA},
- [GLUSTER_CLI_HEAL_VOLUME] = { "HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME, glusterd_handle_cli_heal_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot, NULL, 0, DRC_NA},
- [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME", GLUSTER_CLI_BARRIER_VOLUME, glusterd_handle_barrier, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT, glusterd_handle_get_vol_opt, NULL, 0, DRC_NA},
- [GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT, glusterd_handle_bitrot, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_STATE] = {"GET_STATE", GLUSTER_CLI_GET_STATE, glusterd_handle_get_state, NULL, 0, DRC_NA},
- [GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", GLUSTER_CLI_RESET_BRICK, glusterd_handle_reset_brick, NULL, 0, DRC_NA},
- [GLUSTER_CLI_TIER] = {"TIER", GLUSTER_CLI_TIER, glusterd_handle_tier, NULL, 0, DRC_NA},
- [GLUSTER_CLI_REMOVE_TIER_BRICK] = {"REMOVE_TIER_BRICK", GLUSTER_CLI_REMOVE_TIER_BRICK, glusterd_handle_tier, NULL, 0, DRC_NA},
- [GLUSTER_CLI_ADD_TIER_BRICK] = {"ADD_TIER_BRICK", GLUSTER_CLI_ADD_TIER_BRICK, glusterd_handle_add_tier_brick, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_PROBE] = {"CLI_PROBE", GLUSTER_CLI_PROBE,
+ glusterd_handle_cli_probe, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_CREATE_VOLUME] = {"CLI_CREATE_VOLUME",
+ GLUSTER_CLI_CREATE_VOLUME,
+ glusterd_handle_create_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_DEFRAG_VOLUME] = {"CLI_DEFRAG_VOLUME",
+ GLUSTER_CLI_DEFRAG_VOLUME,
+ glusterd_handle_defrag_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_DEPROBE] = {"FRIEND_REMOVE", GLUSTER_CLI_DEPROBE,
+ glusterd_handle_cli_deprobe, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS,
+ glusterd_handle_cli_list_friends, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", GLUSTER_CLI_UUID_RESET,
+ glusterd_handle_cli_uuid_reset, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_UUID_GET] = {"UUID_GET", GLUSTER_CLI_UUID_GET,
+ glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", GLUSTER_CLI_START_VOLUME,
+ glusterd_handle_cli_start_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME,
+ glusterd_handle_cli_stop_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME,
+ glusterd_handle_cli_delete_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", GLUSTER_CLI_GET_VOLUME,
+ glusterd_handle_cli_get_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", GLUSTER_CLI_ADD_BRICK,
+ glusterd_handle_add_brick, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", GLUSTER_CLI_ATTACH_TIER,
+ glusterd_handle_attach_tier, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK,
+ glusterd_handle_replace_brick, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK,
+ glusterd_handle_remove_brick, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_LOG_ROTATE] = {"LOG FILENAME", GLUSTER_CLI_LOG_ROTATE,
+ glusterd_handle_log_rotate, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", GLUSTER_CLI_SET_VOLUME,
+ glusterd_handle_set_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME,
+ glusterd_handle_sync_volume, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME,
+ glusterd_handle_reset_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", GLUSTER_CLI_FSM_LOG,
+ glusterd_handle_fsm_log, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", GLUSTER_CLI_GSYNC_SET,
+ glusterd_handle_gsync_set, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_PROFILE_VOLUME] = {"STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME,
+ glusterd_handle_cli_profile_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_QUOTA] = {"QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota,
+ NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GETWD] = {"GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd,
+ NULL, 1, DRC_NA},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME,
+ glusterd_handle_status_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_MOUNT] = {"MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount,
+ NULL, 1, DRC_NA},
+ [GLUSTER_CLI_UMOUNT] = {"UMOUNT", GLUSTER_CLI_UMOUNT,
+ glusterd_handle_umount, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME,
+ glusterd_handle_cli_heal_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME",
+ GLUSTER_CLI_STATEDUMP_VOLUME,
+ glusterd_handle_cli_statedump_volume,
+ NULL, 0, DRC_NA},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME,
+ glusterd_handle_cli_list_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME",
+ GLUSTER_CLI_CLRLOCKS_VOLUME,
+ glusterd_handle_cli_clearlocks_volume,
+ NULL, 0, DRC_NA},
+ [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE,
+ glusterd_handle_copy_file, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC,
+ glusterd_handle_sys_exec, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot,
+ NULL, 0, DRC_NA},
+ [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME",
+ GLUSTER_CLI_BARRIER_VOLUME,
+ glusterd_handle_barrier, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT,
+ glusterd_handle_get_vol_opt, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT,
+ glusterd_handle_bitrot, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GET_STATE] = {"GET_STATE", GLUSTER_CLI_GET_STATE,
+ glusterd_handle_get_state, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", GLUSTER_CLI_RESET_BRICK,
+ glusterd_handle_reset_brick, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_TIER] = {"TIER", GLUSTER_CLI_TIER, glusterd_handle_tier, NULL,
+ 0, DRC_NA},
+ [GLUSTER_CLI_REMOVE_TIER_BRICK] = {"REMOVE_TIER_BRICK",
+ GLUSTER_CLI_REMOVE_TIER_BRICK,
+ glusterd_handle_tier, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_ADD_TIER_BRICK] = {"ADD_TIER_BRICK",
+ GLUSTER_CLI_ADD_TIER_BRICK,
+ glusterd_handle_add_tier_brick, NULL, 0,
+ DRC_NA},
};
struct rpcsvc_program gd_svc_cli_prog = {
- .progname = "GlusterD svc cli",
- .prognum = GLUSTER_CLI_PROGRAM,
- .progver = GLUSTER_CLI_VERSION,
- .numactors = GLUSTER_CLI_MAXVALUE,
- .actors = gd_svc_cli_actors,
- .synctask = _gf_true,
+ .progname = "GlusterD svc cli",
+ .prognum = GLUSTER_CLI_PROGRAM,
+ .progver = GLUSTER_CLI_VERSION,
+ .numactors = GLUSTER_CLI_MAXVALUE,
+ .actors = gd_svc_cli_actors,
+ .synctask = _gf_true,
};
/**
@@ -6615,21 +6681,33 @@ struct rpcsvc_program gd_svc_cli_prog = {
* by geo-replication to support unprivileged master -> slave sessions.
*/
rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
- [GLUSTER_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, 0, DRC_NA},
- [GLUSTER_CLI_UUID_GET] = { "UUID_GET", GLUSTER_CLI_UUID_GET, glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_VOLUME] = { "GET_VOLUME", GLUSTER_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GETWD] = { "GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd, NULL, 1, DRC_NA},
- [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME, glusterd_handle_status_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_MOUNT] = { "MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount, NULL, 1, DRC_NA},
- [GLUSTER_CLI_UMOUNT] = { "UMOUNT", GLUSTER_CLI_UMOUNT, glusterd_handle_umount, NULL, 1, DRC_NA},
+ [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS,
+ glusterd_handle_cli_list_friends, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_UUID_GET] = {"UUID_GET", GLUSTER_CLI_UUID_GET,
+ glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", GLUSTER_CLI_GET_VOLUME,
+ glusterd_handle_cli_get_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_GETWD] = {"GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd,
+ NULL, 1, DRC_NA},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME,
+ glusterd_handle_status_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME,
+ glusterd_handle_cli_list_volume, NULL, 0,
+ DRC_NA},
+ [GLUSTER_CLI_MOUNT] = {"MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount,
+ NULL, 1, DRC_NA},
+ [GLUSTER_CLI_UMOUNT] = {"UMOUNT", GLUSTER_CLI_UMOUNT,
+ glusterd_handle_umount, NULL, 1, DRC_NA},
};
struct rpcsvc_program gd_svc_cli_trusted_progs = {
- .progname = "GlusterD svc cli read-only",
- .prognum = GLUSTER_CLI_PROGRAM,
- .progver = GLUSTER_CLI_VERSION,
- .numactors = GLUSTER_CLI_MAXVALUE,
- .actors = gd_svc_cli_trusted_actors,
- .synctask = _gf_true,
+ .progname = "GlusterD svc cli read-only",
+ .prognum = GLUSTER_CLI_PROGRAM,
+ .progver = GLUSTER_CLI_VERSION,
+ .numactors = GLUSTER_CLI_MAXVALUE,
+ .actors = gd_svc_cli_trusted_actors,
+ .synctask = _gf_true,
};
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index 6b5940ce565..809d462dec7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -35,459 +35,448 @@ extern struct rpc_clnt_program gd_peer_prog;
extern struct rpc_clnt_program gd_mgmt_prog;
extern struct rpc_clnt_program gd_mgmt_v3_prog;
+#define TRUSTED_PREFIX "trusted-"
+#define GD_PEER_ID_KEY "peer-id"
-#define TRUSTED_PREFIX "trusted-"
-#define GD_PEER_ID_KEY "peer-id"
-
-typedef ssize_t (*gfs_serialize_t) (struct iovec outmsg, void *data);
+typedef ssize_t (*gfs_serialize_t)(struct iovec outmsg, void *data);
static int
-get_snap_volname_and_volinfo (const char *volpath, char **volname,
- glusterd_volinfo_t **volinfo)
+get_snap_volname_and_volinfo(const char *volpath, char **volname,
+ glusterd_volinfo_t **volinfo)
{
- int ret = -1;
- char *save_ptr = NULL;
- char *str_token = NULL;
- char *snapname = NULL;
- char *volname_token = NULL;
- char *vol = NULL;
- glusterd_snap_t *snap = NULL;
- xlator_t *this = NULL;
- char *tmp_str_token = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (volpath);
- GF_ASSERT (volinfo);
-
- str_token = gf_strdup (volpath);
- if (NULL == str_token) {
- goto out;
- }
-
- tmp_str_token = str_token;
-
- /* Input volname will have below formats:
- * /snaps/<snapname>/<volname>.<hostname>
- * or
- * /snaps/<snapname>/<parent-volname>
- * We need to extract snapname and parent_volname */
-
- /*split string by "/" */
- strtok_r (str_token, "/", &save_ptr);
- snapname = strtok_r(NULL, "/", &save_ptr);
- if (!snapname) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY,
- "Invalid path: %s", volpath);
- goto out;
- }
-
- volname_token = strtok_r(NULL, "/", &save_ptr);
- if (!volname_token) {
- gf_msg (this->name, GF_LOG_ERROR,
- EINVAL, GD_MSG_INVALID_ENTRY,
- "Invalid path: %s", volpath);
- goto out;
- }
-
- snap = glusterd_find_snap_by_name (snapname);
- if (!snap) {
- gf_msg(this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_SNAP_NOT_FOUND, "Failed to "
- "fetch snap %s", snapname);
- goto out;
- }
-
- /* Find if its a parent volume name or snap volume
- * name. This function will succeed if volname_token
- * is a parent volname
- */
- ret = glusterd_volinfo_find (volname_token, volinfo);
+ int ret = -1;
+ char *save_ptr = NULL;
+ char *str_token = NULL;
+ char *snapname = NULL;
+ char *volname_token = NULL;
+ char *vol = NULL;
+ glusterd_snap_t *snap = NULL;
+ xlator_t *this = NULL;
+ char *tmp_str_token = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(volpath);
+ GF_ASSERT(volinfo);
+
+ str_token = gf_strdup(volpath);
+ if (NULL == str_token) {
+ goto out;
+ }
+
+ tmp_str_token = str_token;
+
+ /* Input volname will have below formats:
+ * /snaps/<snapname>/<volname>.<hostname>
+ * or
+ * /snaps/<snapname>/<parent-volname>
+ * We need to extract snapname and parent_volname */
+
+ /*split string by "/" */
+ strtok_r(str_token, "/", &save_ptr);
+ snapname = strtok_r(NULL, "/", &save_ptr);
+ if (!snapname) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid path: %s", volpath);
+ goto out;
+ }
+
+ volname_token = strtok_r(NULL, "/", &save_ptr);
+ if (!volname_token) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid path: %s", volpath);
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name(snapname);
+ if (!snap) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_SNAP_NOT_FOUND,
+ "Failed to "
+ "fetch snap %s",
+ snapname);
+ goto out;
+ }
+
+ /* Find if its a parent volume name or snap volume
+ * name. This function will succeed if volname_token
+ * is a parent volname
+ */
+ ret = glusterd_volinfo_find(volname_token, volinfo);
+ if (ret) {
+ *volname = gf_strdup(volname_token);
+ if (NULL == *volname) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_snap_volinfo_find(volname_token, snap, volinfo);
if (ret) {
- *volname = gf_strdup (volname_token);
- if (NULL == *volname) {
- ret = -1;
- goto out;
- }
-
- ret = glusterd_snap_volinfo_find (volname_token, snap,
- volinfo);
- if (ret) {
- /* Split the volume name */
- vol = strtok_r (volname_token, ".", &save_ptr);
- if (!vol) {
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "Invalid "
- "volname (%s)", volname_token);
- goto out;
- }
-
- ret = glusterd_snap_volinfo_find (vol, snap, volinfo);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAP_INFO_FAIL, "Failed to "
- "fetch snap volume from volname (%s)",
- vol);
- goto out;
- }
- }
- } else {
- /*volname_token is parent volname*/
- ret = glusterd_snap_volinfo_find_from_parent_volname (
- volname_token, snap, volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAP_INFO_FAIL, "Failed to "
- "fetch snap volume from parent "
- "volname (%s)", volname_token);
- goto out;
- }
-
- /* Since volname_token is a parent volname we should
- * get the snap volname here*/
- *volname = gf_strdup ((*volinfo)->volname);
- if (NULL == *volname) {
- ret = -1;
- goto out;
- }
+ /* Split the volume name */
+ vol = strtok_r(volname_token, ".", &save_ptr);
+ if (!vol) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid "
+ "volname (%s)",
+ volname_token);
+ goto out;
+ }
+
+ ret = glusterd_snap_volinfo_find(vol, snap, volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_INFO_FAIL,
+ "Failed to "
+ "fetch snap volume from volname (%s)",
+ vol);
+ goto out;
+ }
+ }
+ } else {
+ /*volname_token is parent volname*/
+ ret = glusterd_snap_volinfo_find_from_parent_volname(volname_token,
+ snap, volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_INFO_FAIL,
+ "Failed to "
+ "fetch snap volume from parent "
+ "volname (%s)",
+ volname_token);
+ goto out;
}
-out:
- if (ret && NULL != *volname) {
- GF_FREE (*volname);
- *volname = NULL;
+ /* Since volname_token is a parent volname we should
+ * get the snap volname here*/
+ *volname = gf_strdup((*volinfo)->volname);
+ if (NULL == *volname) {
+ ret = -1;
+ goto out;
}
+ }
- if (tmp_str_token)
- GF_FREE (tmp_str_token);
- return ret;
+out:
+ if (ret && NULL != *volname) {
+ GF_FREE(*volname);
+ *volname = NULL;
+ }
+
+ if (tmp_str_token)
+ GF_FREE(tmp_str_token);
+ return ret;
}
int32_t
-glusterd_get_client_per_brick_volfile (glusterd_volinfo_t *volinfo,
- char *filename, char *path, int path_len)
+glusterd_get_client_per_brick_volfile(glusterd_volinfo_t *volinfo,
+ char *filename, char *path, int path_len)
{
- char workdir[PATH_MAX] = {0,};
- glusterd_conf_t *priv = NULL;
- int32_t ret = -1;
+ char workdir[PATH_MAX] = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+ int32_t ret = -1;
- GF_VALIDATE_OR_GOTO ("glusterd", THIS, out);
- priv = THIS->private;
- GF_VALIDATE_OR_GOTO (THIS->name, priv, out);
+ GF_VALIDATE_OR_GOTO("glusterd", THIS, out);
+ priv = THIS->private;
+ GF_VALIDATE_OR_GOTO(THIS->name, priv, out);
- GLUSTERD_GET_VOLUME_DIR (workdir, volinfo, priv);
+ GLUSTERD_GET_VOLUME_DIR(workdir, volinfo, priv);
- snprintf (path, path_len, "%s/%s", workdir, filename);
+ snprintf(path, path_len, "%s/%s", workdir, filename);
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
size_t
-build_volfile_path (char *volume_id, char *path,
- size_t path_len, char *trusted_str)
+build_volfile_path(char *volume_id, char *path, size_t path_len,
+ char *trusted_str)
{
- struct stat stbuf = {0,};
- int32_t ret = -1;
- char *vol = NULL;
- char *dup_volname = NULL;
- char *save_ptr = NULL;
- char *free_ptr = NULL;
- char *volname = NULL;
- char *volid_ptr = NULL;
- char dup_volid[PATH_MAX] = {0,};
- char path_prefix[PATH_MAX] = {0,};
- xlator_t *this = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_conf_t *priv = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (volume_id);
- GF_ASSERT (path);
-
- volid_ptr = strstr (volume_id, "snapd/");
- if (volid_ptr) {
- volid_ptr = strchr (volid_ptr, '/');
- if (!volid_ptr) {
- ret = -1;
- goto out;
- }
- volid_ptr++;
-
- ret = glusterd_volinfo_find (volid_ptr, &volinfo);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "Couldn't find volinfo");
- goto out;
- }
- glusterd_svc_build_snapd_volfile (volinfo, path, path_len);
- ret = 0;
- goto out;
+ struct stat stbuf = {
+ 0,
+ };
+ int32_t ret = -1;
+ char *vol = NULL;
+ char *dup_volname = NULL;
+ char *save_ptr = NULL;
+ char *free_ptr = NULL;
+ char *volname = NULL;
+ char *volid_ptr = NULL;
+ char dup_volid[PATH_MAX] = {
+ 0,
+ };
+ char path_prefix[PATH_MAX] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(volume_id);
+ GF_ASSERT(path);
+
+ volid_ptr = strstr(volume_id, "snapd/");
+ if (volid_ptr) {
+ volid_ptr = strchr(volid_ptr, '/');
+ if (!volid_ptr) {
+ ret = -1;
+ goto out;
+ }
+ volid_ptr++;
+
+ ret = glusterd_volinfo_find(volid_ptr, &volinfo);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Couldn't find volinfo");
+ goto out;
+ }
+ glusterd_svc_build_snapd_volfile(volinfo, path, path_len);
+ ret = 0;
+ goto out;
+ }
+ volid_ptr = strstr(volume_id, "tierd/");
+ if (volid_ptr) {
+ volid_ptr = strchr(volid_ptr, '/');
+ if (!volid_ptr) {
+ ret = -1;
+ goto out;
}
+ volid_ptr++;
- volid_ptr = strstr (volume_id, "tierd/");
- if (volid_ptr) {
- volid_ptr = strchr (volid_ptr, '/');
- if (!volid_ptr) {
- ret = -1;
- goto out;
- }
- volid_ptr++;
-
- ret = glusterd_volinfo_find (volid_ptr, &volinfo);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "Couldn't find volinfo");
- goto out;
- }
- glusterd_svc_build_tierd_volfile_path (volinfo, path, path_len);
- ret = 0;
- goto out;
-
+ ret = glusterd_volinfo_find(volid_ptr, &volinfo);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Couldn't find volinfo");
+ goto out;
}
+ glusterd_svc_build_tierd_volfile_path(volinfo, path, path_len);
+ ret = 0;
+ goto out;
+ }
- volid_ptr = strstr (volume_id, "gluster/");
- if (volid_ptr) {
- volid_ptr = strchr (volid_ptr, '/');
- if (!volid_ptr) {
- ret = -1;
- goto out;
- }
- volid_ptr++;
+ volid_ptr = strstr(volume_id, "gluster/");
+ if (volid_ptr) {
+ volid_ptr = strchr(volid_ptr, '/');
+ if (!volid_ptr) {
+ ret = -1;
+ goto out;
+ }
+ volid_ptr++;
- glusterd_svc_build_volfile_path (volid_ptr,
- priv->workdir,
- path, path_len);
- ret = 0;
- goto out;
+ glusterd_svc_build_volfile_path(volid_ptr, priv->workdir, path,
+ path_len);
+ ret = 0;
+ goto out;
+ }
+ volid_ptr = strstr(volume_id, "gfproxy-client/");
+ if (volid_ptr) {
+ volid_ptr = strchr(volid_ptr, '/');
+ if (!volid_ptr) {
+ ret = -1;
+ goto out;
}
+ volid_ptr++;
- volid_ptr = strstr (volume_id, "gfproxy-client/");
- if (volid_ptr) {
- volid_ptr = strchr (volid_ptr, '/');
- if (!volid_ptr) {
- ret = -1;
- goto out;
- }
- volid_ptr++;
+ ret = glusterd_volinfo_find(volid_ptr, &volinfo);
+ if (ret == -1) {
+ gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo");
+ goto out;
+ }
- ret = glusterd_volinfo_find (volid_ptr, &volinfo);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "Couldn't find volinfo");
- goto out;
- }
+ glusterd_get_gfproxy_client_volfile(volinfo, path, path_len);
- glusterd_get_gfproxy_client_volfile (volinfo, path, path_len);
+ ret = 0;
+ goto out;
+ }
- ret = 0;
- goto out;
+ volid_ptr = strstr(volume_id, "gfproxyd/");
+ if (volid_ptr) {
+ volid_ptr = strchr(volid_ptr, '/');
+ if (!volid_ptr) {
+ ret = -1;
+ goto out;
}
+ volid_ptr++;
- volid_ptr = strstr (volume_id, "gfproxyd/");
- if (volid_ptr) {
- volid_ptr = strchr (volid_ptr, '/');
- if (!volid_ptr) {
- ret = -1;
- goto out;
- }
- volid_ptr++;
-
- ret = glusterd_volinfo_find (volid_ptr, &volinfo);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "Couldn't find volinfo");
- goto out;
- }
-
- glusterd_svc_build_gfproxyd_volfile_path (volinfo, path,
- path_len);
- ret = 0;
- goto out;
+ ret = glusterd_volinfo_find(volid_ptr, &volinfo);
+ if (ret == -1) {
+ gf_log(this->name, GF_LOG_ERROR, "Couldn't find volinfo");
+ goto out;
}
- volid_ptr = strstr (volume_id, "/snaps/");
- if (volid_ptr) {
- ret = get_snap_volname_and_volinfo (volid_ptr, &volname,
- &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAP_INFO_FAIL, "Failed to get snap"
- " volinfo from path (%s)", volume_id);
- ret = -1;
- goto out;
- }
+ glusterd_svc_build_gfproxyd_volfile_path(volinfo, path, path_len);
+ ret = 0;
+ goto out;
+ }
- len = snprintf (path_prefix, sizeof (path_prefix),
- "%s/snaps/%s", priv->workdir,
- volinfo->snapshot->snapname);
- volid_ptr = volname;
- /* this is to ensure that volname recvd from
- get_snap_volname_and_volinfo is free'd */
- free_ptr = volname;
- if ((len < 0) || (len >= sizeof(path_prefix))) {
- ret = -1;
- goto out;
- }
+ volid_ptr = strstr(volume_id, "/snaps/");
+ if (volid_ptr) {
+ ret = get_snap_volname_and_volinfo(volid_ptr, &volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_INFO_FAIL,
+ "Failed to get snap"
+ " volinfo from path (%s)",
+ volume_id);
+ ret = -1;
+ goto out;
+ }
+
+ len = snprintf(path_prefix, sizeof(path_prefix), "%s/snaps/%s",
+ priv->workdir, volinfo->snapshot->snapname);
+ volid_ptr = volname;
+ /* this is to ensure that volname recvd from
+ get_snap_volname_and_volinfo is free'd */
+ free_ptr = volname;
+ if ((len < 0) || (len >= sizeof(path_prefix))) {
+ ret = -1;
+ goto out;
+ }
- goto gotvolinfo;
+ goto gotvolinfo;
+ }
+ volid_ptr = strstr(volume_id, "rebalance/");
+ if (volid_ptr) {
+ volid_ptr = strchr(volid_ptr, '/');
+ if (!volid_ptr) {
+ ret = -1;
+ goto out;
}
+ volid_ptr++;
- volid_ptr = strstr (volume_id, "rebalance/");
- if (volid_ptr) {
- volid_ptr = strchr (volid_ptr, '/');
- if (!volid_ptr) {
- ret = -1;
- goto out;
- }
- volid_ptr++;
-
- ret = glusterd_volinfo_find (volid_ptr, &volinfo);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "Couldn't find volinfo");
- goto out;
- }
- glusterd_get_rebalance_volfile (volinfo, path, path_len);
- ret = 0;
- goto out;
+ ret = glusterd_volinfo_find(volid_ptr, &volinfo);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Couldn't find volinfo");
+ goto out;
+ }
+ glusterd_get_rebalance_volfile(volinfo, path, path_len);
+ ret = 0;
+ goto out;
+ }
+
+ volid_ptr = strstr(volume_id, "client_per_brick/");
+ if (volid_ptr) {
+ volid_ptr = strchr(volid_ptr, '/');
+ if (!volid_ptr) {
+ ret = -1;
+ goto out;
+ }
+ volid_ptr++;
+
+ dup_volname = gf_strdup(volid_ptr);
+ if (!dup_volname) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "strdup failed");
+ ret = -1;
+ goto out;
+ }
+
+ /* Split the volume name */
+ vol = strtok_r(dup_volname, ".", &save_ptr);
+ if (!vol) {
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_volinfo_find(vol, &volinfo);
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "Couldn't find volinfo");
+ goto out;
+ }
+ ret = glusterd_get_client_per_brick_volfile(volinfo, volid_ptr, path,
+ path_len);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY,
+ "failed to get volinfo path");
+ goto out;
}
- volid_ptr = strstr (volume_id, "client_per_brick/");
- if (volid_ptr) {
- volid_ptr = strchr (volid_ptr, '/');
- if (!volid_ptr) {
- ret = -1;
- goto out;
- }
- volid_ptr++;
-
- dup_volname = gf_strdup (volid_ptr);
- if (!dup_volname) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY,
- "strdup failed");
- ret = -1;
- goto out;
- }
+ ret = sys_access(path, F_OK);
+ goto out;
+ }
- /* Split the volume name */
- vol = strtok_r (dup_volname, ".", &save_ptr);
- if (!vol) {
- ret = -1;
- goto out;
- }
- ret = glusterd_volinfo_find (vol, &volinfo);
- if (ret == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "Couldn't find volinfo");
- goto out;
- }
- ret = glusterd_get_client_per_brick_volfile (volinfo, volid_ptr,
- path, path_len);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_NO_MEMORY, "failed to get volinfo path");
- goto out;
- }
+ if (volume_id[0] == '/') {
+ /* Normal behavior */
+ volid_ptr = volume_id;
+ volid_ptr++;
- ret = sys_access (path, F_OK);
- goto out;
- }
+ } else {
+ /* Bringing in NFS like behavior for mount command, */
+ /* With this, one can mount a volume with below cmd */
+ /* bash# mount -t glusterfs server:/volume /mnt/pnt */
+ volid_ptr = volume_id;
+ }
- if (volume_id[0] == '/') {
- /* Normal behavior */
- volid_ptr = volume_id;
- volid_ptr++;
+ len = snprintf(path_prefix, sizeof(path_prefix), "%s/vols", priv->workdir);
+ if ((len < 0) || (len >= sizeof(path_prefix))) {
+ ret = -1;
+ goto out;
+ }
- } else {
- /* Bringing in NFS like behavior for mount command, */
- /* With this, one can mount a volume with below cmd */
- /* bash# mount -t glusterfs server:/volume /mnt/pnt */
- volid_ptr = volume_id;
- }
+ ret = glusterd_volinfo_find(volid_ptr, &volinfo);
- len = snprintf (path_prefix, sizeof (path_prefix), "%s/vols",
- priv->workdir);
- if ((len < 0) || (len >= sizeof(path_prefix))) {
- ret = -1;
- goto out;
+ if (ret) {
+ dup_volname = gf_strdup(volid_ptr);
+ if (!dup_volname) {
+ ret = -1;
+ goto out;
}
-
- ret = glusterd_volinfo_find (volid_ptr, &volinfo);
-
- if (ret) {
- dup_volname = gf_strdup (volid_ptr);
- if (!dup_volname) {
- ret = -1;
- goto out;
- }
- /* Split the volume name */
- vol = strtok_r (dup_volname, ".", &save_ptr);
- if (!vol) {
- ret = -1;
- goto out;
- }
- ret = glusterd_volinfo_find (vol, &volinfo);
- if (ret)
- goto out;
+ /* Split the volume name */
+ vol = strtok_r(dup_volname, ".", &save_ptr);
+ if (!vol) {
+ ret = -1;
+ goto out;
}
+ ret = glusterd_volinfo_find(vol, &volinfo);
+ if (ret)
+ goto out;
+ }
gotvolinfo:
- if (!glusterd_auth_get_username (volinfo))
- trusted_str = NULL;
-
- ret = snprintf (path, path_len, "%s/%s/%s.vol", path_prefix,
- volinfo->volname, volid_ptr);
- if (ret == -1)
- goto out;
-
- ret = sys_stat (path, &stbuf);
-
- if ((ret == -1) && (errno == ENOENT)) {
- if (snprintf (dup_volid, PATH_MAX, "%s", volid_ptr)
- >= PATH_MAX)
- goto out;
- if (!strchr (dup_volid, '.')) {
- switch (volinfo->transport_type) {
- case GF_TRANSPORT_TCP:
- strcat (dup_volid, ".tcp");
- break;
- case GF_TRANSPORT_RDMA:
- strcat (dup_volid, ".rdma");
- break;
- case GF_TRANSPORT_BOTH_TCP_RDMA:
- strcat (dup_volid, ".tcp");
- break;
- default:
- break;
- }
- }
- snprintf (path, path_len, "%s/%s/%s%s-fuse.vol",
- path_prefix, volinfo->volname,
- (trusted_str ? trusted_str : ""),
- dup_volid);
- ret = sys_stat (path, &stbuf);
- }
+ if (!glusterd_auth_get_username(volinfo))
+ trusted_str = NULL;
+
+ ret = snprintf(path, path_len, "%s/%s/%s.vol", path_prefix,
+ volinfo->volname, volid_ptr);
+ if (ret == -1)
+ goto out;
+
+ ret = sys_stat(path, &stbuf);
+
+ if ((ret == -1) && (errno == ENOENT)) {
+ if (snprintf(dup_volid, PATH_MAX, "%s", volid_ptr) >= PATH_MAX)
+ goto out;
+ if (!strchr(dup_volid, '.')) {
+ switch (volinfo->transport_type) {
+ case GF_TRANSPORT_TCP:
+ strcat(dup_volid, ".tcp");
+ break;
+ case GF_TRANSPORT_RDMA:
+ strcat(dup_volid, ".rdma");
+ break;
+ case GF_TRANSPORT_BOTH_TCP_RDMA:
+ strcat(dup_volid, ".tcp");
+ break;
+ default:
+ break;
+ }
+ }
+ snprintf(path, path_len, "%s/%s/%s%s-fuse.vol", path_prefix,
+ volinfo->volname, (trusted_str ? trusted_str : ""), dup_volid);
+ ret = sys_stat(path, &stbuf);
+ }
out:
- if (dup_volname)
- GF_FREE (dup_volname);
- if (free_ptr)
- GF_FREE (free_ptr);
- return ret;
+ if (dup_volname)
+ GF_FREE(dup_volname);
+ if (free_ptr)
+ GF_FREE(free_ptr);
+ return ret;
}
/* Get and store op-versions of the clients sending the getspec request
@@ -495,672 +484,652 @@ out:
* defaulted to 1. Also fetch brick_name.
*/
int32_t
-glusterd_get_args_from_dict (gf_getspec_req *args, peer_info_t *peerinfo,
- char **brick_name)
+glusterd_get_args_from_dict(gf_getspec_req *args, peer_info_t *peerinfo,
+ char **brick_name)
{
- dict_t *dict = NULL;
- int client_max_op_version = 1;
- int client_min_op_version = 1;
- int32_t ret = -1;
- xlator_t *this = NULL;
- char *name = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (args);
- GF_ASSERT (peerinfo);
-
- if (!args->xdata.xdata_len) {
- ret = 0;
- goto out;
- }
-
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- goto out;
- }
-
- ret = dict_unserialize (args->xdata.xdata_val,
- args->xdata.xdata_len, &dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "Failed to unserialize request dictionary");
- goto out;
- }
-
- ret = dict_get_int32 (dict, "min-op-version",
- &client_min_op_version);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get client-min-op-version");
- goto out;
- }
-
- ret = dict_get_int32 (dict, "max-op-version",
- &client_max_op_version);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get client-max-op-version");
- goto out;
- }
+ dict_t *dict = NULL;
+ int client_max_op_version = 1;
+ int client_min_op_version = 1;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ char *name = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(args);
+ GF_ASSERT(peerinfo);
+
+ if (!args->xdata.xdata_len) {
+ ret = 0;
+ goto out;
+ }
- ret = dict_get_str (dict, "brick_name", &name);
- if (ret) {
- gf_msg_debug (this->name, 0,
- "No brick name present");
- ret = 0;
- goto out;
- }
- *brick_name = gf_strdup(name);
- if (*brick_name == NULL) {
- ret = -1;
- goto out;
- }
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize(args->xdata.xdata_val, args->xdata.xdata_len, &dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "Failed to unserialize request dictionary");
+ goto out;
+ }
+
+ ret = dict_get_int32(dict, "min-op-version", &client_min_op_version);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get client-min-op-version");
+ goto out;
+ }
+
+ ret = dict_get_int32(dict, "max-op-version", &client_max_op_version);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get client-max-op-version");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "brick_name", &name);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "No brick name present");
+ ret = 0;
+ goto out;
+ }
+ *brick_name = gf_strdup(name);
+ if (*brick_name == NULL) {
+ ret = -1;
+ goto out;
+ }
- gf_msg_debug (this->name, 0, "brick_name = %s", *brick_name);
+ gf_msg_debug(this->name, 0, "brick_name = %s", *brick_name);
out:
- peerinfo->max_op_version = client_max_op_version;
- peerinfo->min_op_version = client_min_op_version;
-
- if (dict)
- dict_unref (dict);
+ peerinfo->max_op_version = client_max_op_version;
+ peerinfo->min_op_version = client_min_op_version;
+ if (dict)
+ dict_unref(dict);
- return ret;
+ return ret;
}
/* Given the missed_snapinfo and snap_opinfo take the
* missed lvm snapshot
*/
int32_t
-glusterd_create_missed_snap (glusterd_missed_snap_info *missed_snapinfo,
- glusterd_snap_op_t *snap_opinfo)
+glusterd_create_missed_snap(glusterd_missed_snap_info *missed_snapinfo,
+ glusterd_snap_op_t *snap_opinfo)
{
- char *device = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_snap_t *snap = NULL;
- glusterd_volinfo_t *snap_vol = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- int32_t ret = -1;
- int32_t i = 0;
- uuid_t snap_uuid = {0,};
- xlator_t *this = NULL;
- char *mnt_device = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (missed_snapinfo);
- GF_ASSERT (snap_opinfo);
-
- gf_uuid_parse (missed_snapinfo->snap_uuid, snap_uuid);
-
- /* Find the snap-object */
- snap = glusterd_find_snap_by_id (snap_uuid);
- if (!snap) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAP_NOT_FOUND,
- "Unable to find the snap with snap_uuid %s",
- missed_snapinfo->snap_uuid);
- ret = -1;
- goto out;
- }
-
- /* Find the snap_vol */
- cds_list_for_each_entry (volinfo, &snap->volumes, vol_list) {
- if (!strcmp (volinfo->volname,
- snap_opinfo->snap_vol_id)) {
- snap_vol = volinfo;
- break;
- }
- }
-
- if (!snap_vol) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND,
- "Unable to find the snap_vol(%s) "
- "for snap(%s)", snap_opinfo->snap_vol_id,
- snap->snapname);
- ret = -1;
- goto out;
- }
-
- /* Find the missed brick in the snap volume */
- cds_list_for_each_entry (brickinfo, &snap_vol->bricks, brick_list) {
- i++;
- if (i == snap_opinfo->brick_num)
- break;
- }
-
- if (brickinfo->snap_status != -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAP_STATUS_NOT_PENDING,
- "The snap status of the missed "
- "brick(%s) is not pending", brickinfo->path);
- goto out;
- }
-
- /* Fetch the device path */
- mnt_device = glusterd_get_brick_mount_device (snap_opinfo->brick_path);
- if (!mnt_device) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_GET_INFO_FAIL,
- "Getting device name for the"
- "brick %s:%s failed", brickinfo->hostname,
- snap_opinfo->brick_path);
- ret = -1;
- goto out;
- }
-
- device = glusterd_build_snap_device_path (mnt_device,
- snap_vol->volname,
- snap_opinfo->brick_num - 1);
- if (!device) {
- gf_msg (this->name, GF_LOG_ERROR, ENXIO,
- GD_MSG_SNAP_DEVICE_NAME_GET_FAIL,
- "cannot copy the snapshot "
- "device name (volname: %s, snapname: %s)",
- snap_vol->volname, snap->snapname);
- ret = -1;
- goto out;
- }
- if (snprintf (brickinfo->device_path ,
- sizeof (brickinfo->device_path), "%s", device) >=
- sizeof (brickinfo->device_path)) {
- gf_msg (this->name, GF_LOG_ERROR, ENXIO,
- GD_MSG_SNAP_DEVICE_NAME_GET_FAIL,
- "cannot copy the device_path "
- "(device_path: %s)", brickinfo->device_path);
- ret = -1;
- goto out;
- }
-
- /* Update the backend file-system type of snap brick in
- * snap volinfo. */
- ret = glusterd_update_mntopts (snap_opinfo->brick_path, brickinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRK_MOUNTOPTS_FAIL, "Failed to update "
- "mount options for %s brick", brickinfo->path);
- /* We should not fail snapshot operation if we fail to get
- * the file-system type */
- }
-
- ret = glusterd_take_lvm_snapshot (brickinfo, snap_opinfo->brick_path);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAPSHOT_OP_FAILED,
- "Failed to take snapshot of %s",
- snap_opinfo->brick_path);
- goto out;
- }
-
- /* After the snapshot both the origin brick (LVM brick) and
- * the snapshot brick will have the same file-system label. This
- * will cause lot of problems at mount time. Therefore we must
- * generate a new label for the snapshot brick
+ char *device = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int32_t ret = -1;
+ int32_t i = 0;
+ uuid_t snap_uuid = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ char *mnt_device = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(missed_snapinfo);
+ GF_ASSERT(snap_opinfo);
+
+ gf_uuid_parse(missed_snapinfo->snap_uuid, snap_uuid);
+
+ /* Find the snap-object */
+ snap = glusterd_find_snap_by_id(snap_uuid);
+ if (!snap) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_NOT_FOUND,
+ "Unable to find the snap with snap_uuid %s",
+ missed_snapinfo->snap_uuid);
+ ret = -1;
+ goto out;
+ }
+
+ /* Find the snap_vol */
+ cds_list_for_each_entry(volinfo, &snap->volumes, vol_list)
+ {
+ if (!strcmp(volinfo->volname, snap_opinfo->snap_vol_id)) {
+ snap_vol = volinfo;
+ break;
+ }
+ }
+
+ if (!snap_vol) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Unable to find the snap_vol(%s) "
+ "for snap(%s)",
+ snap_opinfo->snap_vol_id, snap->snapname);
+ ret = -1;
+ goto out;
+ }
+
+ /* Find the missed brick in the snap volume */
+ cds_list_for_each_entry(brickinfo, &snap_vol->bricks, brick_list)
+ {
+ i++;
+ if (i == snap_opinfo->brick_num)
+ break;
+ }
+
+ if (brickinfo->snap_status != -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_STATUS_NOT_PENDING,
+ "The snap status of the missed "
+ "brick(%s) is not pending",
+ brickinfo->path);
+ goto out;
+ }
+
+ /* Fetch the device path */
+ mnt_device = glusterd_get_brick_mount_device(snap_opinfo->brick_path);
+ if (!mnt_device) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_GET_INFO_FAIL,
+ "Getting device name for the"
+ "brick %s:%s failed",
+ brickinfo->hostname, snap_opinfo->brick_path);
+ ret = -1;
+ goto out;
+ }
+
+ device = glusterd_build_snap_device_path(mnt_device, snap_vol->volname,
+ snap_opinfo->brick_num - 1);
+ if (!device) {
+ gf_msg(this->name, GF_LOG_ERROR, ENXIO,
+ GD_MSG_SNAP_DEVICE_NAME_GET_FAIL,
+ "cannot copy the snapshot "
+ "device name (volname: %s, snapname: %s)",
+ snap_vol->volname, snap->snapname);
+ ret = -1;
+ goto out;
+ }
+ if (snprintf(brickinfo->device_path, sizeof(brickinfo->device_path), "%s",
+ device) >= sizeof(brickinfo->device_path)) {
+ gf_msg(this->name, GF_LOG_ERROR, ENXIO,
+ GD_MSG_SNAP_DEVICE_NAME_GET_FAIL,
+ "cannot copy the device_path "
+ "(device_path: %s)",
+ brickinfo->device_path);
+ ret = -1;
+ goto out;
+ }
+
+ /* Update the backend file-system type of snap brick in
+ * snap volinfo. */
+ ret = glusterd_update_mntopts(snap_opinfo->brick_path, brickinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRK_MOUNTOPTS_FAIL,
+ "Failed to update "
+ "mount options for %s brick",
+ brickinfo->path);
+ /* We should not fail snapshot operation if we fail to get
+ * the file-system type */
+ }
+
+ ret = glusterd_take_lvm_snapshot(brickinfo, snap_opinfo->brick_path);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPSHOT_OP_FAILED,
+ "Failed to take snapshot of %s", snap_opinfo->brick_path);
+ goto out;
+ }
+
+ /* After the snapshot both the origin brick (LVM brick) and
+ * the snapshot brick will have the same file-system label. This
+ * will cause lot of problems at mount time. Therefore we must
+ * generate a new label for the snapshot brick
+ */
+ ret = glusterd_update_fs_label(brickinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_SET_INFO_FAIL,
+ "Failed to update "
+ "file-system label for %s brick",
+ brickinfo->path);
+ /* Failing to update label should not cause snapshot failure.
+ * Currently label is updated only for XFS and ext2/ext3/ext4
+ * file-system.
*/
- ret = glusterd_update_fs_label (brickinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_SET_INFO_FAIL, "Failed to update "
- "file-system label for %s brick", brickinfo->path);
- /* Failing to update label should not cause snapshot failure.
- * Currently label is updated only for XFS and ext2/ext3/ext4
- * file-system.
- */
- }
-
- /* Create and mount the snap brick */
- ret = glusterd_snap_brick_create (snap_vol, brickinfo,
- snap_opinfo->brick_num - 1, 0);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_CREATION_FAIL, "Failed to "
- " create and mount the brick(%s) for the snap %s",
- snap_opinfo->brick_path,
- snap_vol->snapshot->snapname);
- goto out;
- }
-
- brickinfo->snap_status = 0;
- ret = glusterd_brick_start (snap_vol, brickinfo, _gf_false, _gf_false);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_BRICK_DISCONNECTED, "starting the "
- "brick %s:%s for the snap %s failed",
- brickinfo->hostname, brickinfo->path,
- snap->snapname);
- goto out;
- }
- ret = glusterd_store_volinfo (snap_vol,
- GLUSTERD_VOLINFO_VER_AC_NONE);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_STORE_FAIL, "Failed to store snapshot "
- "volinfo (%s) for snap %s", snap_vol->volname,
- snap->snapname);
- goto out;
- }
+ }
+
+ /* Create and mount the snap brick */
+ ret = glusterd_snap_brick_create(snap_vol, brickinfo,
+ snap_opinfo->brick_num - 1, 0);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_CREATION_FAIL,
+ "Failed to "
+ " create and mount the brick(%s) for the snap %s",
+ snap_opinfo->brick_path, snap_vol->snapshot->snapname);
+ goto out;
+ }
+
+ brickinfo->snap_status = 0;
+ ret = glusterd_brick_start(snap_vol, brickinfo, _gf_false, _gf_false);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_BRICK_DISCONNECTED,
+ "starting the "
+ "brick %s:%s for the snap %s failed",
+ brickinfo->hostname, brickinfo->path, snap->snapname);
+ goto out;
+ }
+ ret = glusterd_store_volinfo(snap_vol, GLUSTERD_VOLINFO_VER_AC_NONE);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
+ "Failed to store snapshot "
+ "volinfo (%s) for snap %s",
+ snap_vol->volname, snap->snapname);
+ goto out;
+ }
out:
- if (mnt_device)
- GF_FREE (mnt_device);
- if (device)
- GF_FREE (device);
+ if (mnt_device)
+ GF_FREE(mnt_device);
+ if (device)
+ GF_FREE(device);
- return ret;
+ return ret;
}
/* Look into missed_snap_list, to see it the given brick_name,
* has any missed snap creates for the local node */
int32_t
-glusterd_take_missing_brick_snapshots (char *brick_name)
+glusterd_take_missing_brick_snapshots(char *brick_name)
{
- char *my_node_uuid = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_missed_snap_info *missed_snapinfo = NULL;
- glusterd_snap_op_t *snap_opinfo = NULL;
- int32_t ret = -1;
- gf_boolean_t update_list = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (brick_name);
-
- my_node_uuid = uuid_utoa (MY_UUID);
-
- cds_list_for_each_entry (missed_snapinfo, &priv->missed_snaps_list,
- missed_snaps) {
- /* If the missed snap op is not for the local node
- * then continue
+ char *my_node_uuid = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_missed_snap_info *missed_snapinfo = NULL;
+ glusterd_snap_op_t *snap_opinfo = NULL;
+ int32_t ret = -1;
+ gf_boolean_t update_list = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(brick_name);
+
+ my_node_uuid = uuid_utoa(MY_UUID);
+
+ cds_list_for_each_entry(missed_snapinfo, &priv->missed_snaps_list,
+ missed_snaps)
+ {
+ /* If the missed snap op is not for the local node
+ * then continue
+ */
+ if (strcmp(my_node_uuid, missed_snapinfo->node_uuid))
+ continue;
+
+ cds_list_for_each_entry(snap_opinfo, &missed_snapinfo->snap_ops,
+ snap_ops_list)
+ {
+ /* Check if the missed snap's op is a create for
+ * the brick name in question
+ */
+ if ((snap_opinfo->op == GF_SNAP_OPTION_TYPE_CREATE) &&
+ (!strcmp(brick_name, snap_opinfo->brick_path))) {
+ /* Perform a snap create if the
+ * op is still pending
*/
- if (strcmp (my_node_uuid, missed_snapinfo->node_uuid))
- continue;
-
- cds_list_for_each_entry (snap_opinfo,
- &missed_snapinfo->snap_ops,
- snap_ops_list) {
- /* Check if the missed snap's op is a create for
- * the brick name in question
+ if (snap_opinfo->status == GD_MISSED_SNAP_PENDING) {
+ ret = glusterd_create_missed_snap(missed_snapinfo,
+ snap_opinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MISSED_SNAP_CREATE_FAIL,
+ "Failed to create "
+ "missed snap for %s",
+ brick_name);
+ /* At this stage, we will mark
+ * the entry as done. Because
+ * of the failure other
+ * snapshots will not be
+ * affected, and neither the
+ * brick. Only the current snap
+ * brick will always remain as
+ * pending.
*/
- if ((snap_opinfo->op == GF_SNAP_OPTION_TYPE_CREATE) &&
- (!strcmp (brick_name, snap_opinfo->brick_path))) {
- /* Perform a snap create if the
- * op is still pending
- */
- if (snap_opinfo->status ==
- GD_MISSED_SNAP_PENDING) {
- ret = glusterd_create_missed_snap
- (missed_snapinfo,
- snap_opinfo);
- if (ret) {
- gf_msg (this->name,
- GF_LOG_ERROR, 0,
- GD_MSG_MISSED_SNAP_CREATE_FAIL,
- "Failed to create "
- "missed snap for %s",
- brick_name);
- /* At this stage, we will mark
- * the entry as done. Because
- * of the failure other
- * snapshots will not be
- * affected, and neither the
- * brick. Only the current snap
- * brick will always remain as
- * pending.
- */
- }
- snap_opinfo->status =
- GD_MISSED_SNAP_DONE;
- update_list = _gf_true;
- }
- /* One snap-id won't have more than one missed
- * create for the same brick path. Hence
- * breaking in search of another missed create
- * for the same brick path in the local node
- */
- break;
- }
+ }
+ snap_opinfo->status = GD_MISSED_SNAP_DONE;
+ update_list = _gf_true;
}
+ /* One snap-id won't have more than one missed
+ * create for the same brick path. Hence
+ * breaking in search of another missed create
+ * for the same brick path in the local node
+ */
+ break;
+ }
}
+ }
- if (update_list == _gf_true) {
- ret = glusterd_store_update_missed_snaps ();
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MISSED_SNAP_LIST_STORE_FAIL,
- "Failed to update missed_snaps_list");
- goto out;
- }
+ if (update_list == _gf_true) {
+ ret = glusterd_store_update_missed_snaps();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MISSED_SNAP_LIST_STORE_FAIL,
+ "Failed to update missed_snaps_list");
+ goto out;
}
+ }
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
/* Checks if the client supports the volume, ie. client can understand all the
* options in the volfile
*/
static gf_boolean_t
-_client_supports_volume (peer_info_t *peerinfo, int32_t *op_errno)
+_client_supports_volume(peer_info_t *peerinfo, int32_t *op_errno)
{
- gf_boolean_t ret = _gf_true;
- glusterd_volinfo_t *volinfo = NULL;
-
- GF_ASSERT (peerinfo);
- GF_ASSERT (op_errno);
-
-
- /* Only check when the volfile being requested is a volume. Not finding
- * a volinfo implies that the volfile requested for is not of a gluster
- * volume. A non volume volfile is requested by the local gluster
- * services like shd and nfs-server. These need not be checked as they
- * will be running at the same op-version as glusterd and will be able
- * to support all the features
- */
- if ((glusterd_volinfo_find (peerinfo->volname, &volinfo) == 0) &&
- ((peerinfo->min_op_version > volinfo->client_op_version) ||
- (peerinfo->max_op_version < volinfo->client_op_version))) {
- ret = _gf_false;
- *op_errno = ENOTSUP;
- gf_msg ("glusterd", GF_LOG_INFO, ENOTSUP,
- GD_MSG_UNSUPPORTED_VERSION,
- "Client %s (%d -> %d) doesn't support required "
- "op-version (%d). Rejecting volfile request.",
- peerinfo->identifier, peerinfo->min_op_version,
- peerinfo->max_op_version, volinfo->client_op_version);
- }
-
- return ret;
+ gf_boolean_t ret = _gf_true;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(op_errno);
+
+ /* Only check when the volfile being requested is a volume. Not finding
+ * a volinfo implies that the volfile requested for is not of a gluster
+ * volume. A non volume volfile is requested by the local gluster
+ * services like shd and nfs-server. These need not be checked as they
+ * will be running at the same op-version as glusterd and will be able
+ * to support all the features
+ */
+ if ((glusterd_volinfo_find(peerinfo->volname, &volinfo) == 0) &&
+ ((peerinfo->min_op_version > volinfo->client_op_version) ||
+ (peerinfo->max_op_version < volinfo->client_op_version))) {
+ ret = _gf_false;
+ *op_errno = ENOTSUP;
+ gf_msg("glusterd", GF_LOG_INFO, ENOTSUP, GD_MSG_UNSUPPORTED_VERSION,
+ "Client %s (%d -> %d) doesn't support required "
+ "op-version (%d). Rejecting volfile request.",
+ peerinfo->identifier, peerinfo->min_op_version,
+ peerinfo->max_op_version, volinfo->client_op_version);
+ }
+
+ return ret;
}
int
-__server_getspec (rpcsvc_request_t *req)
+__server_getspec(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- int32_t op_ret = -1;
- int32_t op_errno = 0;
- int32_t spec_fd = -1;
- size_t file_len = 0;
- char filename[PATH_MAX] = {0,};
- struct stat stbuf = {0,};
- char *brick_name = NULL;
- char *volume = NULL;
- char *tmp = NULL;
- rpc_transport_t *trans = NULL;
- gf_getspec_req args = {0,};
- gf_getspec_rsp rsp = {0,};
- char addrstr[RPCSVC_PEER_STRLEN] = {0};
- peer_info_t *peerinfo = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &args,
- (xdrproc_t)xdr_gf_getspec_req);
- if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto fail;
- }
-
- peerinfo = &req->trans->peerinfo;
-
- volume = args.key;
-
- /* Need to strip leading '/' from volnames. This was introduced to
- * support nfs style mount parameters for native gluster mount
- */
- if (volume[0] == '/')
- ret = snprintf (peerinfo->volname, sizeof (peerinfo->volname),
- "%s", &volume[1]);
- else
- ret = snprintf (peerinfo->volname, sizeof (peerinfo->volname),
- "%s", volume);
- if (ret < 0 || ret >= sizeof (peerinfo->volname)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL,
- "peerinfo->volname %s truncated or error occured: "
- "(ret: %d)", peerinfo->volname, ret);
- ret = -1;
- goto fail;
- }
-
- ret = glusterd_get_args_from_dict (&args, peerinfo, &brick_name);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get args from dict");
- goto fail;
- }
-
- if (!_client_supports_volume (peerinfo, &op_errno)) {
- ret = -1;
- goto fail;
- }
-
- trans = req->trans;
- /* addrstr will be empty for cli socket connections */
- ret = rpcsvc_transport_peername (trans, (char *)&addrstr,
- sizeof (addrstr));
- if (ret)
- goto fail;
-
- tmp = strrchr (addrstr, ':');
- if (tmp)
- *tmp = '\0';
-
- /* The trusted volfiles are given to the glusterd owned process like NFS
- * server, self-heal daemon etc., so that they are not inadvertently
- * blocked by a auth.{allow,reject} setting. The trusted volfile is not
- * meant for external users.
- * For unix domain socket, address will be empty.
- */
- if (strlen (addrstr) == 0 || gf_is_local_addr (addrstr)) {
-
- ret = build_volfile_path (volume, filename,
- sizeof (filename),
- TRUSTED_PREFIX);
- } else {
- ret = build_volfile_path (volume, filename,
- sizeof (filename), NULL);
- }
-
- if (ret == 0) {
- /* to allocate the proper buffer to hold the file data */
- ret = sys_stat (filename, &stbuf);
- if (ret < 0){
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_FILE_OP_FAILED,
- "Unable to stat %s (%s)",
- filename, strerror (errno));
- goto fail;
- }
-
- spec_fd = open (filename, O_RDONLY);
- if (spec_fd < 0) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_FILE_OP_FAILED,
- "Unable to open %s (%s)",
- filename, strerror (errno));
- goto fail;
- }
- ret = file_len = stbuf.st_size;
- } else {
- op_errno = ENOENT;
- goto fail;
- }
-
- if (file_len) {
- rsp.spec = CALLOC (file_len+1, sizeof (char));
- if (!rsp.spec) {
- ret = -1;
- op_errno = ENOMEM;
- goto fail;
- }
- ret = sys_read (spec_fd, rsp.spec, file_len);
- }
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = 0;
+ int32_t spec_fd = -1;
+ size_t file_len = 0;
+ char filename[PATH_MAX] = {
+ 0,
+ };
+ struct stat stbuf = {
+ 0,
+ };
+ char *brick_name = NULL;
+ char *volume = NULL;
+ char *tmp = NULL;
+ rpc_transport_t *trans = NULL;
+ gf_getspec_req args = {
+ 0,
+ };
+ gf_getspec_rsp rsp = {
+ 0,
+ };
+ char addrstr[RPCSVC_PEER_STRLEN] = {0};
+ peer_info_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_getspec_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto fail;
+ }
+
+ peerinfo = &req->trans->peerinfo;
+
+ volume = args.key;
+
+ /* Need to strip leading '/' from volnames. This was introduced to
+ * support nfs style mount parameters for native gluster mount
+ */
+ if (volume[0] == '/')
+ ret = snprintf(peerinfo->volname, sizeof(peerinfo->volname), "%s",
+ &volume[1]);
+ else
+ ret = snprintf(peerinfo->volname, sizeof(peerinfo->volname), "%s",
+ volume);
+ if (ret < 0 || ret >= sizeof(peerinfo->volname)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "peerinfo->volname %s truncated or error occured: "
+ "(ret: %d)",
+ peerinfo->volname, ret);
+ ret = -1;
+ goto fail;
+ }
- if (brick_name) {
- gf_msg_debug (this->name, 0,
- "Look for missing snap creates for %s", brick_name);
- op_ret = glusterd_take_missing_brick_snapshots (brick_name);
- if (op_ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MISSED_SNAP_CREATE_FAIL,
- "Failed to take missing brick snapshots");
- ret = -1;
- goto fail;
- }
- }
+ ret = glusterd_get_args_from_dict(&args, peerinfo, &brick_name);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get args from dict");
+ goto fail;
+ }
- /* convert to XDR */
+ if (!_client_supports_volume(peerinfo, &op_errno)) {
+ ret = -1;
+ goto fail;
+ }
+
+ trans = req->trans;
+ /* addrstr will be empty for cli socket connections */
+ ret = rpcsvc_transport_peername(trans, (char *)&addrstr, sizeof(addrstr));
+ if (ret)
+ goto fail;
+
+ tmp = strrchr(addrstr, ':');
+ if (tmp)
+ *tmp = '\0';
+
+ /* The trusted volfiles are given to the glusterd owned process like NFS
+ * server, self-heal daemon etc., so that they are not inadvertently
+ * blocked by a auth.{allow,reject} setting. The trusted volfile is not
+ * meant for external users.
+ * For unix domain socket, address will be empty.
+ */
+ if (strlen(addrstr) == 0 || gf_is_local_addr(addrstr)) {
+ ret = build_volfile_path(volume, filename, sizeof(filename),
+ TRUSTED_PREFIX);
+ } else {
+ ret = build_volfile_path(volume, filename, sizeof(filename), NULL);
+ }
+
+ if (ret == 0) {
+ /* to allocate the proper buffer to hold the file data */
+ ret = sys_stat(filename, &stbuf);
+ if (ret < 0) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to stat %s (%s)", filename, strerror(errno));
+ goto fail;
+ }
+
+ spec_fd = open(filename, O_RDONLY);
+ if (spec_fd < 0) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to open %s (%s)", filename, strerror(errno));
+ goto fail;
+ }
+ ret = file_len = stbuf.st_size;
+ } else {
+ op_errno = ENOENT;
+ goto fail;
+ }
+
+ if (file_len) {
+ rsp.spec = CALLOC(file_len + 1, sizeof(char));
+ if (!rsp.spec) {
+ ret = -1;
+ op_errno = ENOMEM;
+ goto fail;
+ }
+ ret = sys_read(spec_fd, rsp.spec, file_len);
+ }
+
+ if (brick_name) {
+ gf_msg_debug(this->name, 0, "Look for missing snap creates for %s",
+ brick_name);
+ op_ret = glusterd_take_missing_brick_snapshots(brick_name);
+ if (op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MISSED_SNAP_CREATE_FAIL,
+ "Failed to take missing brick snapshots");
+ ret = -1;
+ goto fail;
+ }
+ }
+
+ /* convert to XDR */
fail:
- if (spec_fd >= 0)
- sys_close (spec_fd);
+ if (spec_fd >= 0)
+ sys_close(spec_fd);
- GF_FREE(brick_name);
+ GF_FREE(brick_name);
- rsp.op_ret = ret;
+ rsp.op_ret = ret;
- if (op_errno)
- rsp.op_errno = gf_errno_to_error (op_errno);
+ if (op_errno)
+ rsp.op_errno = gf_errno_to_error(op_errno);
- if (!rsp.spec)
- rsp.spec = strdup ("");
+ if (!rsp.spec)
+ rsp.spec = strdup("");
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_getspec_rsp);
- free (args.key);//malloced by xdr
- free (rsp.spec);
- if (args.xdata.xdata_val)
- free (args.xdata.xdata_val);
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_getspec_rsp);
+ free(args.key); // malloced by xdr
+ free(rsp.spec);
+ if (args.xdata.xdata_val)
+ free(args.xdata.xdata_val);
- return 0;
+ return 0;
}
int
-server_getspec (rpcsvc_request_t *req)
+server_getspec(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __server_getspec);
+ return glusterd_big_locked_handler(req, __server_getspec);
}
int32_t
-__server_event_notify (rpcsvc_request_t *req)
+__server_event_notify(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_event_notify_req args = {0,};
- gf_event_notify_rsp rsp = {0,};
- dict_t *dict = NULL;
- gf_boolean_t need_rsp = _gf_true;
-
- ret = xdr_to_generic (req->msg[0], &args,
- (xdrproc_t)xdr_gf_event_notify_req);
- if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- goto fail;
- }
-
- if (args.dict.dict_len) {
- dict = dict_new ();
- if (!dict)
- return ret;
- ret = dict_unserialize (args.dict.dict_val,
- args.dict.dict_len, &dict);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "Failed to unserialize req");
- goto fail;
- }
+ int32_t ret = -1;
+ gf_event_notify_req args = {
+ 0,
+ };
+ gf_event_notify_rsp rsp = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ gf_boolean_t need_rsp = _gf_true;
+
+ ret = xdr_to_generic(req->msg[0], &args,
+ (xdrproc_t)xdr_gf_event_notify_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto fail;
+ }
+
+ if (args.dict.dict_len) {
+ dict = dict_new();
+ if (!dict)
+ return ret;
+ ret = dict_unserialize(args.dict.dict_val, args.dict.dict_len, &dict);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "Failed to unserialize req");
+ goto fail;
}
+ }
- switch (args.op) {
+ switch (args.op) {
case GF_EN_DEFRAG_STATUS:
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_DEFRAG_STATUS_UPDATED,
- "received defrag status updated");
- if (dict) {
- glusterd_defrag_event_notify_handle (dict);
- need_rsp = _gf_false;
- }
- break;
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_DEFRAG_STATUS_UPDATED,
+ "received defrag status updated");
+ if (dict) {
+ glusterd_defrag_event_notify_handle(dict);
+ need_rsp = _gf_false;
+ }
+ break;
default:
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_OP_UNSUPPORTED, "Unknown op received in event "
- "notify");
- gf_event (EVENT_NOTIFY_UNKNOWN_OP, "op=%d", args.op);
- ret = -1;
- break;
- }
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_OP_UNSUPPORTED,
+ "Unknown op received in event "
+ "notify");
+ gf_event(EVENT_NOTIFY_UNKNOWN_OP, "op=%d", args.op);
+ ret = -1;
+ break;
+ }
fail:
- rsp.op_ret = ret;
+ rsp.op_ret = ret;
- if (need_rsp)
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_event_notify_rsp);
- if (dict)
- dict_unref (dict);
- free (args.dict.dict_val);//malloced by xdr
+ if (need_rsp)
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_event_notify_rsp);
+ if (dict)
+ dict_unref(dict);
+ free(args.dict.dict_val); // malloced by xdr
- return 0;
+ return 0;
}
int32_t
-server_event_notify (rpcsvc_request_t *req)
+server_event_notify(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req, __server_event_notify);
+ return glusterd_big_locked_handler(req, __server_event_notify);
}
int
-gd_validate_cluster_op_version (xlator_t *this, int cluster_op_version,
- char *peerid)
+gd_validate_cluster_op_version(xlator_t *this, int cluster_op_version,
+ char *peerid)
{
- int ret = -1;
- glusterd_conf_t *conf = NULL;
-
- conf = this->private;
-
- if (cluster_op_version > GD_OP_VERSION_MAX) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_VERSION_MISMATCH,
- "operating version %d is more than the maximum "
- "supported (%d) on the machine (as per peer request "
- "from %s)", cluster_op_version, GD_OP_VERSION_MAX,
- peerid);
- goto out;
- }
-
- /* The peer can only reduce its op-version when it doesn't have any
- * volumes. Reducing op-version when it already contains volumes can
- * lead to inconsistencies in the cluster
- */
- if ((cluster_op_version < conf->op_version) &&
- !cds_list_empty (&conf->volumes)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_VERS_ADJUST_FAIL,
- "cannot reduce operating version to %d from current "
- "version %d as volumes exist (as per peer request from "
- "%s)", cluster_op_version, conf->op_version, peerid);
- goto out;
- }
-
- ret = 0;
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+
+ conf = this->private;
+
+ if (cluster_op_version > GD_OP_VERSION_MAX) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH,
+ "operating version %d is more than the maximum "
+ "supported (%d) on the machine (as per peer request "
+ "from %s)",
+ cluster_op_version, GD_OP_VERSION_MAX, peerid);
+ goto out;
+ }
+
+ /* The peer can only reduce its op-version when it doesn't have any
+ * volumes. Reducing op-version when it already contains volumes can
+ * lead to inconsistencies in the cluster
+ */
+ if ((cluster_op_version < conf->op_version) &&
+ !cds_list_empty(&conf->volumes)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_ADJUST_FAIL,
+ "cannot reduce operating version to %d from current "
+ "version %d as volumes exist (as per peer request from "
+ "%s)",
+ cluster_op_version, conf->op_version, peerid);
+ goto out;
+ }
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
/* Validate if glusterd can serve the management handshake request
@@ -1174,1242 +1143,1230 @@ out:
* - the incoming request address is matched with the peer list
*/
gf_boolean_t
-gd_validate_mgmt_hndsk_req (rpcsvc_request_t *req, dict_t *dict)
+gd_validate_mgmt_hndsk_req(rpcsvc_request_t *req, dict_t *dict)
{
- int ret = -1;
- char hostname[UNIX_PATH_MAX + 1] = {0,};
- glusterd_peerinfo_t *peer = NULL;
- xlator_t *this = NULL;
- char *uuid_str = NULL;
- uuid_t peer_uuid = {0,};
-
- this = THIS;
- GF_ASSERT (this);
-
- if (!glusterd_have_peers () && !glusterd_have_volumes ())
- return _gf_true;
-
- ret = dict_get_str (dict, GD_PEER_ID_KEY, &uuid_str);
- /* Try to match uuid only if available, don't fail as older peers will
- * not send a uuid
- */
- if (!ret) {
- gf_uuid_parse (uuid_str, peer_uuid);
- rcu_read_lock ();
- ret = (glusterd_peerinfo_find (peer_uuid, NULL) != NULL);
- rcu_read_unlock ();
- if (ret)
- return _gf_true;
- }
+ int ret = -1;
+ char hostname[UNIX_PATH_MAX + 1] = {
+ 0,
+ };
+ glusterd_peerinfo_t *peer = NULL;
+ xlator_t *this = NULL;
+ char *uuid_str = NULL;
+ uuid_t peer_uuid = {
+ 0,
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (!glusterd_have_peers() && !glusterd_have_volumes())
+ return _gf_true;
- /* If you cannot get the hostname, you cannot authenticate */
- ret = glusterd_remote_hostname_get (req, hostname, sizeof (hostname));
+ ret = dict_get_str(dict, GD_PEER_ID_KEY, &uuid_str);
+ /* Try to match uuid only if available, don't fail as older peers will
+ * not send a uuid
+ */
+ if (!ret) {
+ gf_uuid_parse(uuid_str, peer_uuid);
+ rcu_read_lock();
+ ret = (glusterd_peerinfo_find(peer_uuid, NULL) != NULL);
+ rcu_read_unlock();
if (ret)
- return _gf_false;
-
- /* If peer object is not found it indicates that request is from an
- * unknown peer, if its found, validate whether its uuid is also
- * available in the peerinfo list. There could be a case where hostname
- * is available in the peerinfo list but the uuid has changed of the
- * node due to a reinstall, in that case the validation should fail!
- */
- rcu_read_lock ();
- if (!uuid_str) {
- ret = (glusterd_peerinfo_find (NULL, hostname) == NULL);
+ return _gf_true;
+ }
+
+ /* If you cannot get the hostname, you cannot authenticate */
+ ret = glusterd_remote_hostname_get(req, hostname, sizeof(hostname));
+ if (ret)
+ return _gf_false;
+
+ /* If peer object is not found it indicates that request is from an
+ * unknown peer, if its found, validate whether its uuid is also
+ * available in the peerinfo list. There could be a case where hostname
+ * is available in the peerinfo list but the uuid has changed of the
+ * node due to a reinstall, in that case the validation should fail!
+ */
+ rcu_read_lock();
+ if (!uuid_str) {
+ ret = (glusterd_peerinfo_find(NULL, hostname) == NULL);
+ } else {
+ peer = glusterd_peerinfo_find(NULL, hostname);
+ if (!peer) {
+ ret = -1;
+ } else if (peer && glusterd_peerinfo_find(peer_uuid, NULL) != NULL) {
+ ret = 0;
} else {
- peer = glusterd_peerinfo_find (NULL, hostname);
- if (!peer) {
- ret = -1;
- } else if (peer &&
- glusterd_peerinfo_find (peer_uuid, NULL) != NULL) {
- ret = 0;
- } else {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_HANDSHAKE_REQ_REJECTED, "Request from "
- "peer %s has an entry in peerinfo, but uuid "
- "does not match",
- req->trans->peerinfo.identifier);
- ret = -1;
- }
- }
- rcu_read_unlock ();
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_HANDSHAKE_REQ_REJECTED, "Rejecting management "
- "handshake request from unknown peer %s",
- req->trans->peerinfo.identifier);
- gf_event (EVENT_PEER_REJECT, "peer=%s",
- req->trans->peerinfo.identifier);
- return _gf_false;
- }
-
- return _gf_true;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDSHAKE_REQ_REJECTED,
+ "Request from "
+ "peer %s has an entry in peerinfo, but uuid "
+ "does not match",
+ req->trans->peerinfo.identifier);
+ ret = -1;
+ }
+ }
+ rcu_read_unlock();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDSHAKE_REQ_REJECTED,
+ "Rejecting management "
+ "handshake request from unknown peer %s",
+ req->trans->peerinfo.identifier);
+ gf_event(EVENT_PEER_REJECT, "peer=%s", req->trans->peerinfo.identifier);
+ return _gf_false;
+ }
+
+ return _gf_true;
}
int
-__glusterd_mgmt_hndsk_versions (rpcsvc_request_t *req)
+__glusterd_mgmt_hndsk_versions(rpcsvc_request_t *req)
{
- dict_t *dict = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- int ret = -1;
- int op_errno = EINVAL;
- gf_mgmt_hndsk_req args = {{0,},};
- gf_mgmt_hndsk_rsp rsp = {0,};
- dict_t *args_dict = NULL;
-
- this = THIS;
- conf = this->private;
-
- ret = xdr_to_generic (req->msg[0], &args,
- (xdrproc_t)xdr_gf_mgmt_hndsk_req);
- if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- GF_PROTOCOL_DICT_UNSERIALIZE (this, args_dict, args.hndsk.hndsk_val,
- (args.hndsk.hndsk_len), ret, op_errno,
- out);
-
- /* Check if we can service the request */
- if (!gd_validate_mgmt_hndsk_req (req, args_dict)) {
- ret = -1;
- goto out;
- }
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int ret = -1;
+ int op_errno = EINVAL;
+ gf_mgmt_hndsk_req args = {
+ {
+ 0,
+ },
+ };
+ gf_mgmt_hndsk_rsp rsp = {
+ 0,
+ };
+ dict_t *args_dict = NULL;
+
+ this = THIS;
+ conf = this->private;
+
+ ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_mgmt_hndsk_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ GF_PROTOCOL_DICT_UNSERIALIZE(this, args_dict, args.hndsk.hndsk_val,
+ (args.hndsk.hndsk_len), ret, op_errno, out);
+
+ /* Check if we can service the request */
+ if (!gd_validate_mgmt_hndsk_req(req, args_dict)) {
+ ret = -1;
+ goto out;
+ }
- dict = dict_new ();
- if (!dict)
- goto out;
+ dict = dict_new();
+ if (!dict)
+ goto out;
- ret = dict_set_int32 (dict, GD_OP_VERSION_KEY, conf->op_version);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_SET_FAILED,
- "failed to set operating version");
- rsp.op_ret = ret;
- goto out;
- }
+ ret = dict_set_int32(dict, GD_OP_VERSION_KEY, conf->op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set operating version");
+ rsp.op_ret = ret;
+ goto out;
+ }
- ret = dict_set_int32 (dict, GD_MIN_OP_VERSION_KEY, GD_OP_VERSION_MIN);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_SET_FAILED,
- "failed to set %s", GD_MIN_OP_VERSION_KEY);
- rsp.op_ret = ret;
- goto out;
- }
+ ret = dict_set_int32(dict, GD_MIN_OP_VERSION_KEY, GD_OP_VERSION_MIN);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set %s", GD_MIN_OP_VERSION_KEY);
+ rsp.op_ret = ret;
+ goto out;
+ }
- ret = dict_set_int32 (dict, GD_MAX_OP_VERSION_KEY, GD_OP_VERSION_MAX);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_SET_FAILED,
- "failed to set %s", GD_MAX_OP_VERSION_KEY);
- rsp.op_ret = ret;
- goto out;
- }
+ ret = dict_set_int32(dict, GD_MAX_OP_VERSION_KEY, GD_OP_VERSION_MAX);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set %s", GD_MAX_OP_VERSION_KEY);
+ rsp.op_ret = ret;
+ goto out;
+ }
- ret = 0;
+ ret = 0;
- GF_PROTOCOL_DICT_SERIALIZE (this, dict, (&rsp.hndsk.hndsk_val),
- rsp.hndsk.hndsk_len, op_errno, out);
+ GF_PROTOCOL_DICT_SERIALIZE(this, dict, (&rsp.hndsk.hndsk_val),
+ rsp.hndsk.hndsk_len, op_errno, out);
out:
- rsp.op_ret = ret;
- rsp.op_errno = op_errno;
+ rsp.op_ret = ret;
+ rsp.op_errno = op_errno;
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_mgmt_hndsk_rsp);
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_mgmt_hndsk_rsp);
- ret = 0;
+ ret = 0;
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- if (args.hndsk.hndsk_val)
- free (args.hndsk.hndsk_val);
+ if (args.hndsk.hndsk_val)
+ free(args.hndsk.hndsk_val);
- if (rsp.hndsk.hndsk_val)
- GF_FREE (rsp.hndsk.hndsk_val);
+ if (rsp.hndsk.hndsk_val)
+ GF_FREE(rsp.hndsk.hndsk_val);
- if (args_dict)
- dict_unref (args_dict);
+ if (args_dict)
+ dict_unref(args_dict);
- return ret;
+ return ret;
}
int
-glusterd_mgmt_hndsk_versions (rpcsvc_request_t *req)
+glusterd_mgmt_hndsk_versions(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_mgmt_hndsk_versions);
+ return glusterd_big_locked_handler(req, __glusterd_mgmt_hndsk_versions);
}
int
-__glusterd_mgmt_hndsk_versions_ack (rpcsvc_request_t *req)
+__glusterd_mgmt_hndsk_versions_ack(rpcsvc_request_t *req)
{
- dict_t *clnt_dict = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- int ret = -1;
- int op_errno = EINVAL;
- int peer_op_version = 0;
- gf_mgmt_hndsk_req args = {{0,},};
- gf_mgmt_hndsk_rsp rsp = {0,};
-
- this = THIS;
- conf = this->private;
-
- ret = xdr_to_generic (req->msg[0], &args,
- (xdrproc_t)xdr_gf_mgmt_hndsk_req);
- if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- GF_PROTOCOL_DICT_UNSERIALIZE (this, clnt_dict, args.hndsk.hndsk_val,
- (args.hndsk.hndsk_len), ret, op_errno,
- out);
-
- ret = dict_get_int32 (clnt_dict, GD_OP_VERSION_KEY, &peer_op_version);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED,
- "failed to get the op-version key peer=%s",
- req->trans->peerinfo.identifier);
- goto out;
- }
-
- ret = gd_validate_cluster_op_version (this, peer_op_version,
- req->trans->peerinfo.identifier);
- if (ret)
- goto out;
-
-
- /* As this is ACK from the Cluster for the versions supported,
- can set the op-version of 'this' glusterd to the one
- received. */
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_VERS_INFO, "using the op-version %d",
- peer_op_version);
- conf->op_version = peer_op_version;
- ret = glusterd_store_global_info (this);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_GLOBAL_OP_VERSION_SET_FAIL,
- "Failed to store op-version");
+ dict_t *clnt_dict = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int ret = -1;
+ int op_errno = EINVAL;
+ int peer_op_version = 0;
+ gf_mgmt_hndsk_req args = {
+ {
+ 0,
+ },
+ };
+ gf_mgmt_hndsk_rsp rsp = {
+ 0,
+ };
+
+ this = THIS;
+ conf = this->private;
+
+ ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_mgmt_hndsk_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ GF_PROTOCOL_DICT_UNSERIALIZE(this, clnt_dict, args.hndsk.hndsk_val,
+ (args.hndsk.hndsk_len), ret, op_errno, out);
+
+ ret = dict_get_int32(clnt_dict, GD_OP_VERSION_KEY, &peer_op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "failed to get the op-version key peer=%s",
+ req->trans->peerinfo.identifier);
+ goto out;
+ }
+
+ ret = gd_validate_cluster_op_version(this, peer_op_version,
+ req->trans->peerinfo.identifier);
+ if (ret)
+ goto out;
+
+ /* As this is ACK from the Cluster for the versions supported,
+ can set the op-version of 'this' glusterd to the one
+ received. */
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VERS_INFO,
+ "using the op-version %d", peer_op_version);
+ conf->op_version = peer_op_version;
+ ret = glusterd_store_global_info(this);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLOBAL_OP_VERSION_SET_FAIL,
+ "Failed to store op-version");
out:
- rsp.op_ret = ret;
- rsp.op_errno = op_errno;
+ rsp.op_ret = ret;
+ rsp.op_errno = op_errno;
- glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_mgmt_hndsk_rsp);
+ glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_mgmt_hndsk_rsp);
- ret = 0;
+ ret = 0;
- if (clnt_dict)
- dict_unref (clnt_dict);
+ if (clnt_dict)
+ dict_unref(clnt_dict);
- if (args.hndsk.hndsk_val)
- free (args.hndsk.hndsk_val);
+ if (args.hndsk.hndsk_val)
+ free(args.hndsk.hndsk_val);
- return ret;
+ return ret;
}
int
-glusterd_mgmt_hndsk_versions_ack (rpcsvc_request_t *req)
+glusterd_mgmt_hndsk_versions_ack(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_mgmt_hndsk_versions_ack);
+ return glusterd_big_locked_handler(req, __glusterd_mgmt_hndsk_versions_ack);
}
int
-__server_get_volume_info (rpcsvc_request_t *req)
+__server_get_volume_info(rpcsvc_request_t *req)
{
- int ret = -1;
- int32_t op_errno = ENOENT;
- gf_get_volume_info_req vol_info_req = {{0,}};
- gf_get_volume_info_rsp vol_info_rsp = {0,};
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- dict_t *dict = NULL;
- dict_t *dict_rsp = NULL;
- char *volume_id_str = NULL;
- int32_t flags = 0;
-
- ret = xdr_to_generic (req->msg[0], &vol_info_req,
- (xdrproc_t)xdr_gf_get_volume_info_req);
- if (ret < 0) {
- /* failed to decode msg */
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_VOL_INFO_REQ_RECVD, "Received get volume info req");
-
- if (vol_info_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
- if (!dict) {
- gf_msg ("glusterd", GF_LOG_WARNING, ENOMEM,
- GD_MSG_NO_MEMORY, "Out of Memory");
- op_errno = ENOMEM;
- ret = -1;
- goto out;
- }
-
- ret = dict_unserialize (vol_info_req.dict.dict_val,
- vol_info_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- op_errno = -ret;
- ret = -1;
- goto out;
- } else {
- dict->extra_stdfree = vol_info_req.dict.dict_val;
- }
+ int ret = -1;
+ int32_t op_errno = ENOENT;
+ gf_get_volume_info_req vol_info_req = {{
+ 0,
+ }};
+ gf_get_volume_info_rsp vol_info_rsp = {
+ 0,
+ };
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ dict_t *dict = NULL;
+ dict_t *dict_rsp = NULL;
+ char *volume_id_str = NULL;
+ int32_t flags = 0;
+
+ ret = xdr_to_generic(req->msg[0], &vol_info_req,
+ (xdrproc_t)xdr_gf_get_volume_info_req);
+ if (ret < 0) {
+ /* failed to decode msg */
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_VOL_INFO_REQ_RECVD,
+ "Received get volume info req");
+
+ if (vol_info_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
+ if (!dict) {
+ gf_msg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_NO_MEMORY,
+ "Out of Memory");
+ op_errno = ENOMEM;
+ ret = -1;
+ goto out;
}
- ret = dict_get_int32 (dict, "flags", &flags);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, -ret,
- GD_MSG_DICT_GET_FAILED, "failed to get flags");
- op_errno = -ret;
- ret = -1;
- goto out;
+ ret = dict_unserialize(vol_info_req.dict.dict_val,
+ vol_info_req.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ } else {
+ dict->extra_stdfree = vol_info_req.dict.dict_val;
}
+ }
- if (!flags) {
- /* Nothing to query about. Just return success */
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_NO_FLAG_SET, "No flags set");
- ret = 0;
- goto out;
- }
+ ret = dict_get_int32(dict, "flags", &flags);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
+ "failed to get flags");
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- op_errno = EINVAL;
- ret = -1;
- goto out;
- }
+ if (!flags) {
+ /* Nothing to query about. Just return success */
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_NO_FLAG_SET, "No flags set");
+ ret = 0;
+ goto out;
+ }
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- op_errno = EINVAL;
- ret = -1;
- goto out;
- }
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ op_errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
- if (flags & (int32_t)GF_GET_VOLUME_UUID) {
- volume_id_str = gf_strdup (uuid_utoa (volinfo->volume_id));
- if (!volume_id_str) {
- op_errno = ENOMEM;
- ret = -1;
- goto out;
- }
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ op_errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
- dict_rsp = dict_new ();
- if (!dict_rsp) {
- gf_msg ("glusterd", GF_LOG_WARNING, ENOMEM,
- GD_MSG_NO_MEMORY, "Out of Memory");
- op_errno = ENOMEM;
- GF_FREE (volume_id_str);
- ret = -1;
- goto out;
- }
- ret = dict_set_dynstr (dict_rsp, "volume_id", volume_id_str);
- if (ret) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
+ if (flags & (int32_t)GF_GET_VOLUME_UUID) {
+ volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id));
+ if (!volume_id_str) {
+ op_errno = ENOMEM;
+ ret = -1;
+ goto out;
}
- ret = dict_allocate_and_serialize (dict_rsp, &vol_info_rsp.dict.dict_val,
- &vol_info_rsp.dict.dict_len);
+
+ dict_rsp = dict_new();
+ if (!dict_rsp) {
+ gf_msg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_NO_MEMORY,
+ "Out of Memory");
+ op_errno = ENOMEM;
+ GF_FREE(volume_id_str);
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr(dict_rsp, "volume_id", volume_id_str);
if (ret) {
- op_errno = -ret;
- ret = -1;
- goto out;
- }
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+ }
+ ret = dict_allocate_and_serialize(dict_rsp, &vol_info_rsp.dict.dict_val,
+ &vol_info_rsp.dict.dict_len);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
out:
- vol_info_rsp.op_ret = ret;
- vol_info_rsp.op_errno = op_errno;
- vol_info_rsp.op_errstr = "";
- glusterd_submit_reply (req, &vol_info_rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_get_volume_info_rsp);
- ret = 0;
-
- if (dict) {
- dict_unref (dict);
- }
-
- if (dict_rsp) {
- dict_unref (dict_rsp);
- }
-
- if (vol_info_rsp.dict.dict_val) {
- GF_FREE (vol_info_rsp.dict.dict_val);
- }
- return ret;
+ vol_info_rsp.op_ret = ret;
+ vol_info_rsp.op_errno = op_errno;
+ vol_info_rsp.op_errstr = "";
+ glusterd_submit_reply(req, &vol_info_rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_get_volume_info_rsp);
+ ret = 0;
+
+ if (dict) {
+ dict_unref(dict);
+ }
+
+ if (dict_rsp) {
+ dict_unref(dict_rsp);
+ }
+
+ if (vol_info_rsp.dict.dict_val) {
+ GF_FREE(vol_info_rsp.dict.dict_val);
+ }
+ return ret;
}
int
-server_get_volume_info (rpcsvc_request_t *req)
+server_get_volume_info(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __server_get_volume_info);
+ return glusterd_big_locked_handler(req, __server_get_volume_info);
}
-
/*
* glusterd function to get the list of snapshot names and uuids
*/
int
-__server_get_snap_info (rpcsvc_request_t *req)
+__server_get_snap_info(rpcsvc_request_t *req)
{
- int ret = -1;
- int op_errno = ENOENT;
- gf_getsnap_name_uuid_req snap_info_req = {{0,}};
- gf_getsnap_name_uuid_rsp snap_info_rsp = {0,};
- dict_t *dict = NULL;
- dict_t *dict_rsp = NULL;
- char *volname = NULL;
-
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &snap_info_req,
- (xdrproc_t)xdr_gf_getsnap_name_uuid_req);
- if (ret < 0) {
- req->rpc_err = GARBAGE_ARGS;
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL,
- "Failed to decode management handshake response");
- goto out;
+ int ret = -1;
+ int op_errno = ENOENT;
+ gf_getsnap_name_uuid_req snap_info_req = {{
+ 0,
+ }};
+ gf_getsnap_name_uuid_rsp snap_info_rsp = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ dict_t *dict_rsp = NULL;
+ char *volname = NULL;
+
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &snap_info_req,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_req);
+ if (ret < 0) {
+ req->rpc_err = GARBAGE_ARGS;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode management handshake response");
+ goto out;
+ }
+
+ if (snap_info_req.dict.dict_len) {
+ dict = dict_new();
+ if (!dict) {
+ op_errno = ENOMEM;
+ ret = -1;
+ goto out;
}
- if (snap_info_req.dict.dict_len) {
- dict = dict_new ();
- if (!dict) {
- op_errno = ENOMEM;
- ret = -1;
- goto out;
- }
-
- ret = dict_unserialize (snap_info_req.dict.dict_val,
- snap_info_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "Failed to unserialize dictionary");
- op_errno = EINVAL;
- ret = -1;
- goto out;
- } else {
- dict->extra_stdfree = snap_info_req.dict.dict_val;
- }
+ ret = dict_unserialize(snap_info_req.dict.dict_val,
+ snap_info_req.dict.dict_len, &dict);
+ if (ret < 0) {
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL,
+ GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "Failed to unserialize dictionary");
+ op_errno = EINVAL;
+ ret = -1;
+ goto out;
+ } else {
+ dict->extra_stdfree = snap_info_req.dict.dict_val;
}
+ }
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- op_errno = EINVAL;
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_DICT_GET_FAILED,
- "Failed to retrieve volname");
- ret = -1;
- goto out;
- }
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ op_errno = EINVAL;
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_DICT_GET_FAILED,
+ "Failed to retrieve volname");
+ ret = -1;
+ goto out;
+ }
- dict_rsp = dict_new ();
- if (!dict_rsp) {
- op_errno = ENOMEM;
- ret = -1;
- goto out;
- }
+ dict_rsp = dict_new();
+ if (!dict_rsp) {
+ op_errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
- ret = glusterd_snapshot_get_volnames_uuids (dict_rsp, volname,
- &snap_info_rsp);
+ ret = glusterd_snapshot_get_volnames_uuids(dict_rsp, volname,
+ &snap_info_rsp);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND,
- "Error getting snapshot volume names and uuids : %s",
- volname);
- op_errno = EINVAL;
- }
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Error getting snapshot volume names and uuids : %s", volname);
+ op_errno = EINVAL;
+ }
out:
- snap_info_rsp.op_ret = ret;
- snap_info_rsp.op_errno = op_errno;
- snap_info_rsp.op_errstr = "";
- glusterd_submit_reply (req, &snap_info_rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gf_getsnap_name_uuid_rsp);
-
- if (dict) {
- dict_unref (dict);
- }
+ snap_info_rsp.op_ret = ret;
+ snap_info_rsp.op_errno = op_errno;
+ snap_info_rsp.op_errstr = "";
+ glusterd_submit_reply(req, &snap_info_rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_getsnap_name_uuid_rsp);
- if (dict_rsp) {
- dict_unref (dict_rsp);
- }
+ if (dict) {
+ dict_unref(dict);
+ }
- if (snap_info_rsp.dict.dict_val) {
- GF_FREE (snap_info_rsp.dict.dict_val);
- }
+ if (dict_rsp) {
+ dict_unref(dict_rsp);
+ }
+
+ if (snap_info_rsp.dict.dict_val) {
+ GF_FREE(snap_info_rsp.dict.dict_val);
+ }
- return 0;
+ return 0;
}
int
-server_get_snap_info (rpcsvc_request_t *req)
+server_get_snap_info(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __server_get_snap_info);
+ return glusterd_big_locked_handler(req, __server_get_snap_info);
}
rpcsvc_actor_t gluster_handshake_actors[GF_HNDSK_MAXVALUE] = {
- [GF_HNDSK_NULL] = {"NULL", GF_HNDSK_NULL, NULL, NULL, 0, DRC_NA},
- [GF_HNDSK_GETSPEC] = {"GETSPEC", GF_HNDSK_GETSPEC, server_getspec, NULL, 0, DRC_NA},
- [GF_HNDSK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_HNDSK_EVENT_NOTIFY, server_event_notify, NULL, 0, DRC_NA},
- [GF_HNDSK_GET_VOLUME_INFO] = {"GETVOLUMEINFO", GF_HNDSK_GET_VOLUME_INFO, server_get_volume_info, NULL, 0, DRC_NA},
- [GF_HNDSK_GET_SNAPSHOT_INFO] = {"GETSNAPINFO", GF_HNDSK_GET_SNAPSHOT_INFO, server_get_snap_info, NULL, 0, DRC_NA},
+ [GF_HNDSK_NULL] = {"NULL", GF_HNDSK_NULL, NULL, NULL, 0, DRC_NA},
+ [GF_HNDSK_GETSPEC] = {"GETSPEC", GF_HNDSK_GETSPEC, server_getspec, NULL, 0,
+ DRC_NA},
+ [GF_HNDSK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_HNDSK_EVENT_NOTIFY,
+ server_event_notify, NULL, 0, DRC_NA},
+ [GF_HNDSK_GET_VOLUME_INFO] = {"GETVOLUMEINFO", GF_HNDSK_GET_VOLUME_INFO,
+ server_get_volume_info, NULL, 0, DRC_NA},
+ [GF_HNDSK_GET_SNAPSHOT_INFO] = {"GETSNAPINFO", GF_HNDSK_GET_SNAPSHOT_INFO,
+ server_get_snap_info, NULL, 0, DRC_NA},
};
-
struct rpcsvc_program gluster_handshake_prog = {
- .progname = "Gluster Handshake",
- .prognum = GLUSTER_HNDSK_PROGRAM,
- .progver = GLUSTER_HNDSK_VERSION,
- .actors = gluster_handshake_actors,
- .numactors = GF_HNDSK_MAXVALUE,
+ .progname = "Gluster Handshake",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+ .actors = gluster_handshake_actors,
+ .numactors = GF_HNDSK_MAXVALUE,
};
/* A minimal RPC program just for the cli getspec command */
rpcsvc_actor_t gluster_cli_getspec_actors[GF_HNDSK_MAXVALUE] = {
- [GF_HNDSK_GETSPEC] = {"GETSPEC", GF_HNDSK_GETSPEC, server_getspec, NULL, 0, DRC_NA},
+ [GF_HNDSK_GETSPEC] = {"GETSPEC", GF_HNDSK_GETSPEC, server_getspec, NULL, 0,
+ DRC_NA},
};
struct rpcsvc_program gluster_cli_getspec_prog = {
- .progname = "Gluster Handshake (CLI Getspec)",
- .prognum = GLUSTER_HNDSK_PROGRAM,
- .progver = GLUSTER_HNDSK_VERSION,
- .actors = gluster_cli_getspec_actors,
- .numactors = GF_HNDSK_MAXVALUE,
+ .progname = "Gluster Handshake (CLI Getspec)",
+ .prognum = GLUSTER_HNDSK_PROGRAM,
+ .progver = GLUSTER_HNDSK_VERSION,
+ .actors = gluster_cli_getspec_actors,
+ .numactors = GF_HNDSK_MAXVALUE,
};
-
char *glusterd_dump_proc[GF_DUMP_MAXVALUE] = {
- [GF_DUMP_NULL] = "NULL",
- [GF_DUMP_DUMP] = "DUMP",
- [GF_DUMP_PING] = "PING",
+ [GF_DUMP_NULL] = "NULL",
+ [GF_DUMP_DUMP] = "DUMP",
+ [GF_DUMP_PING] = "PING",
};
rpc_clnt_prog_t glusterd_dump_prog = {
- .progname = "GLUSTERD-DUMP",
- .prognum = GLUSTER_DUMP_PROGRAM,
- .progver = GLUSTER_DUMP_VERSION,
- .procnames = glusterd_dump_proc,
+ .progname = "GLUSTERD-DUMP",
+ .prognum = GLUSTER_DUMP_PROGRAM,
+ .progver = GLUSTER_DUMP_VERSION,
+ .procnames = glusterd_dump_proc,
};
-
rpcsvc_actor_t glusterd_mgmt_hndsk_actors[GD_MGMT_HNDSK_MAXVALUE] = {
- [GD_MGMT_HNDSK_NULL] = {"NULL", GD_MGMT_HNDSK_NULL, NULL,
- NULL, 0, DRC_NA},
- [GD_MGMT_HNDSK_VERSIONS] = {"MGMT-VERS", GD_MGMT_HNDSK_VERSIONS,
- glusterd_mgmt_hndsk_versions, NULL,
- 0, DRC_NA},
- [GD_MGMT_HNDSK_VERSIONS_ACK] = {"MGMT-VERS-ACK",
- GD_MGMT_HNDSK_VERSIONS_ACK,
- glusterd_mgmt_hndsk_versions_ack,
- NULL, 0, DRC_NA},
+ [GD_MGMT_HNDSK_NULL] = {"NULL", GD_MGMT_HNDSK_NULL, NULL, NULL, 0, DRC_NA},
+ [GD_MGMT_HNDSK_VERSIONS] = {"MGMT-VERS", GD_MGMT_HNDSK_VERSIONS,
+ glusterd_mgmt_hndsk_versions, NULL, 0, DRC_NA},
+ [GD_MGMT_HNDSK_VERSIONS_ACK] = {"MGMT-VERS-ACK", GD_MGMT_HNDSK_VERSIONS_ACK,
+ glusterd_mgmt_hndsk_versions_ack, NULL, 0,
+ DRC_NA},
};
struct rpcsvc_program glusterd_mgmt_hndsk_prog = {
- .progname = "Gluster MGMT Handshake",
- .prognum = GD_MGMT_HNDSK_PROGRAM,
- .progver = GD_MGMT_HNDSK_VERSION,
- .actors = glusterd_mgmt_hndsk_actors,
- .numactors = GD_MGMT_HNDSK_MAXVALUE,
+ .progname = "Gluster MGMT Handshake",
+ .prognum = GD_MGMT_HNDSK_PROGRAM,
+ .progver = GD_MGMT_HNDSK_VERSION,
+ .actors = glusterd_mgmt_hndsk_actors,
+ .numactors = GD_MGMT_HNDSK_MAXVALUE,
};
char *glusterd_mgmt_hndsk_proc[GD_MGMT_HNDSK_MAXVALUE] = {
- [GD_MGMT_HNDSK_NULL] = "NULL",
- [GD_MGMT_HNDSK_VERSIONS] = "MGMT-VERS",
- [GD_MGMT_HNDSK_VERSIONS_ACK] = "MGMT-VERS-ACK",
+ [GD_MGMT_HNDSK_NULL] = "NULL",
+ [GD_MGMT_HNDSK_VERSIONS] = "MGMT-VERS",
+ [GD_MGMT_HNDSK_VERSIONS_ACK] = "MGMT-VERS-ACK",
};
rpc_clnt_prog_t gd_clnt_mgmt_hndsk_prog = {
- .progname = "Gluster MGMT Handshake",
- .prognum = GD_MGMT_HNDSK_PROGRAM,
- .progver = GD_MGMT_HNDSK_VERSION,
- .procnames = glusterd_mgmt_hndsk_proc,
+ .progname = "Gluster MGMT Handshake",
+ .prognum = GD_MGMT_HNDSK_PROGRAM,
+ .progver = GD_MGMT_HNDSK_VERSION,
+ .procnames = glusterd_mgmt_hndsk_proc,
};
-
static int
-glusterd_event_connected_inject (glusterd_peerctx_t *peerctx)
+glusterd_event_connected_inject(glusterd_peerctx_t *peerctx)
{
- GF_ASSERT (peerctx);
+ GF_ASSERT(peerctx);
- glusterd_friend_sm_event_t *event = NULL;
- glusterd_probe_ctx_t *ctx = NULL;
- int ret = -1;
- glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_friend_sm_event_t *event = NULL;
+ glusterd_probe_ctx_t *ctx = NULL;
+ int ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_CONNECTED, &event);
- ret = glusterd_friend_sm_new_event
- (GD_FRIEND_EVENT_CONNECTED, &event);
-
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_EVENT_NEW_GET_FAIL, "Unable to get new event");
- goto out;
- }
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
+ "Unable to get new event");
+ goto out;
+ }
- ctx = GF_CALLOC (1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
- if (!ctx) {
- ret = -1;
- gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY, "Memory not available");
- goto out;
- }
-
- rcu_read_lock ();
-
- peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
- if (!peerinfo) {
- ret = -1;
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
- peerctx->peername, uuid_utoa (peerctx->peerid));
- GF_FREE (ctx);
- goto unlock;
- }
- ctx->hostname = gf_strdup (peerinfo->hostname);
- ctx->port = peerinfo->port;
- ctx->req = peerctx->args.req;
- ctx->dict = peerctx->args.dict;
-
- event->peername = gf_strdup (peerinfo->hostname);
- gf_uuid_copy (event->peerid, peerinfo->uuid);
- event->ctx = ctx;
+ if (!ctx) {
+ ret = -1;
+ gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Memory not available");
+ goto out;
+ }
- ret = glusterd_friend_sm_inject_event (event);
+ rcu_read_lock();
- if (ret)
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_EVENT_INJECT_FAIL, "Unable to inject "
- "EVENT_CONNECTED ret = %d", ret);
+ peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
+ if (!peerinfo) {
+ ret = -1;
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
+ "Could not find peer %s(%s)", peerctx->peername,
+ uuid_utoa(peerctx->peerid));
+ GF_FREE(ctx);
+ goto unlock;
+ }
+ ctx->hostname = gf_strdup(peerinfo->hostname);
+ ctx->port = peerinfo->port;
+ ctx->req = peerctx->args.req;
+ ctx->dict = peerctx->args.dict;
+
+ event->peername = gf_strdup(peerinfo->hostname);
+ gf_uuid_copy(event->peerid, peerinfo->uuid);
+ event->ctx = ctx;
+
+ ret = glusterd_friend_sm_inject_event(event);
+
+ if (ret)
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
+ "Unable to inject "
+ "EVENT_CONNECTED ret = %d",
+ ret);
unlock:
- rcu_read_unlock ();
+ rcu_read_unlock();
out:
- gf_msg_debug ("glusterd", 0, "returning %d", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "returning %d", ret);
+ return ret;
}
-
int
-gd_validate_peer_op_version (xlator_t *this, glusterd_peerinfo_t *peerinfo,
- dict_t *dict, char **errstr)
+gd_validate_peer_op_version(xlator_t *this, glusterd_peerinfo_t *peerinfo,
+ dict_t *dict, char **errstr)
{
- int ret = -1;
- glusterd_conf_t *conf = NULL;
- int32_t peer_op_version = 0;
- int32_t peer_min_op_version = 0;
- int32_t peer_max_op_version = 0;
-
- if (!dict || !this || !peerinfo)
- goto out;
-
- conf = this->private;
-
- ret = dict_get_int32 (dict, GD_OP_VERSION_KEY, &peer_op_version);
- if (ret)
- goto out;
-
- ret = dict_get_int32 (dict, GD_MAX_OP_VERSION_KEY,
- &peer_max_op_version);
- if (ret)
- goto out;
-
- ret = dict_get_int32 (dict, GD_MIN_OP_VERSION_KEY,
- &peer_min_op_version);
- if (ret)
- goto out;
-
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ int32_t peer_op_version = 0;
+ int32_t peer_min_op_version = 0;
+ int32_t peer_max_op_version = 0;
+
+ if (!dict || !this || !peerinfo)
+ goto out;
+
+ conf = this->private;
+
+ ret = dict_get_int32(dict, GD_OP_VERSION_KEY, &peer_op_version);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32(dict, GD_MAX_OP_VERSION_KEY, &peer_max_op_version);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32(dict, GD_MIN_OP_VERSION_KEY, &peer_min_op_version);
+ if (ret)
+ goto out;
+
+ ret = -1;
+ /* Check if peer can support our op_version */
+ if ((peer_max_op_version < conf->op_version) ||
+ (peer_min_op_version > conf->op_version)) {
+ ret = gf_asprintf(errstr,
+ "Peer %s does not support required "
+ "op-version",
+ peerinfo->hostname);
ret = -1;
- /* Check if peer can support our op_version */
- if ((peer_max_op_version < conf->op_version) ||
- (peer_min_op_version > conf->op_version)) {
- ret = gf_asprintf (errstr, "Peer %s does not support required "
- "op-version", peerinfo->hostname);
- ret = -1;
- goto out;
- }
+ goto out;
+ }
- ret = 0;
+ ret = 0;
out:
- if (peerinfo)
- gf_msg_debug ((this ? this->name : "glusterd") , 0, "Peer %s %s",
- peerinfo->hostname,
- ((ret < 0) ? "rejected" : "accepted"));
- return ret;
+ if (peerinfo)
+ gf_msg_debug((this ? this->name : "glusterd"), 0, "Peer %s %s",
+ peerinfo->hostname, ((ret < 0) ? "rejected" : "accepted"));
+ return ret;
}
int
-__glusterd_mgmt_hndsk_version_ack_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+__glusterd_mgmt_hndsk_version_ack_cbk(struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- int ret = -1;
- gf_mgmt_hndsk_rsp rsp = {0,};
- xlator_t *this = NULL;
- call_frame_t *frame = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peerctx_t *peerctx = NULL;
- char msg[64] = {0,};
-
- this = THIS;
- frame = myframe;
- peerctx = frame->local;
-
- rcu_read_lock ();
- peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
- if (!peerinfo) {
- gf_msg_debug (this->name, 0, "Could not find peer %s(%s)",
- peerctx->peername, uuid_utoa (peerctx->peerid));
- ret = -1;
- goto out;
- }
-
- if (-1 == req->rpc_status) {
- snprintf (msg, sizeof (msg),
- "Error through RPC layer, retry again later");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_RPC_LAYER_ERROR, "%s", msg);
- peerctx->errstr = gf_strdup (msg);
- goto out;
- }
-
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_mgmt_hndsk_rsp);
- if (ret < 0) {
- snprintf (msg, sizeof (msg), "Failed to decode XDR");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "%s", msg);
- peerctx->errstr = gf_strdup (msg);
- goto out;
- }
-
- if (-1 == rsp.op_ret) {
- ret = -1;
- snprintf (msg, sizeof (msg),
- "Failed to get handshake ack from remote server");
- gf_msg (frame->this->name, GF_LOG_ERROR, 0,
- GD_MSG_NO_HANDSHAKE_ACK, "%s", msg);
- peerctx->errstr = gf_strdup (msg);
- goto out;
- }
-
- /* TODO: this is hardcoded as of now, but I don't forsee any problems
- * with this as long as we are properly handshaking operating versions
- */
- peerinfo->mgmt = &gd_mgmt_prog;
- peerinfo->peer = &gd_peer_prog;
- peerinfo->mgmt_v3 = &gd_mgmt_v3_prog;
-
- ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
-
- if (GD_MODE_ON == peerctx->args.mode) {
- (void) glusterd_event_connected_inject (peerctx);
- peerctx->args.req = NULL;
- } else if (GD_MODE_SWITCH_ON == peerctx->args.mode) {
- peerctx->args.mode = GD_MODE_ON;
- } else {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_UNKNOWN_MODE, "unknown mode %d",
- peerctx->args.mode);
- }
-
- ret = 0;
+ int ret = -1;
+ gf_mgmt_hndsk_rsp rsp = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ call_frame_t *frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_t *peerctx = NULL;
+ char msg[64] = {
+ 0,
+ };
+
+ this = THIS;
+ frame = myframe;
+ peerctx = frame->local;
+
+ rcu_read_lock();
+ peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
+ if (!peerinfo) {
+ gf_msg_debug(this->name, 0, "Could not find peer %s(%s)",
+ peerctx->peername, uuid_utoa(peerctx->peerid));
+ ret = -1;
+ goto out;
+ }
+
+ if (-1 == req->rpc_status) {
+ snprintf(msg, sizeof(msg),
+ "Error through RPC layer, retry again later");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_LAYER_ERROR, "%s", msg);
+ peerctx->errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_mgmt_hndsk_rsp);
+ if (ret < 0) {
+ snprintf(msg, sizeof(msg), "Failed to decode XDR");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s", msg);
+ peerctx->errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ if (-1 == rsp.op_ret) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Failed to get handshake ack from remote server");
+ gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_NO_HANDSHAKE_ACK,
+ "%s", msg);
+ peerctx->errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ /* TODO: this is hardcoded as of now, but I don't forsee any problems
+ * with this as long as we are properly handshaking operating versions
+ */
+ peerinfo->mgmt = &gd_mgmt_prog;
+ peerinfo->peer = &gd_peer_prog;
+ peerinfo->mgmt_v3 = &gd_mgmt_v3_prog;
+
+ ret = default_notify(this, GF_EVENT_CHILD_UP, NULL);
+
+ if (GD_MODE_ON == peerctx->args.mode) {
+ (void)glusterd_event_connected_inject(peerctx);
+ peerctx->args.req = NULL;
+ } else if (GD_MODE_SWITCH_ON == peerctx->args.mode) {
+ peerctx->args.mode = GD_MODE_ON;
+ } else {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_UNKNOWN_MODE,
+ "unknown mode %d", peerctx->args.mode);
+ }
+
+ ret = 0;
out:
- if (ret != 0 && peerinfo)
- rpc_transport_disconnect (peerinfo->rpc->conn.trans, _gf_false);
+ if (ret != 0 && peerinfo)
+ rpc_transport_disconnect(peerinfo->rpc->conn.trans, _gf_false);
- rcu_read_unlock ();
+ rcu_read_unlock();
- frame->local = NULL;
- STACK_DESTROY (frame->root);
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
- if (rsp.hndsk.hndsk_val)
- free (rsp.hndsk.hndsk_val);
+ if (rsp.hndsk.hndsk_val)
+ free(rsp.hndsk.hndsk_val);
- glusterd_friend_sm ();
+ glusterd_friend_sm();
- return 0;
+ return 0;
}
int
-glusterd_mgmt_hndsk_version_ack_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_hndsk_version_ack_cbk(struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- return glusterd_big_locked_cbk (req, iov, count, myframe,
- __glusterd_mgmt_hndsk_version_ack_cbk);
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ __glusterd_mgmt_hndsk_version_ack_cbk);
}
int
-__glusterd_mgmt_hndsk_version_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+__glusterd_mgmt_hndsk_version_cbk(struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- int ret = -1;
- int op_errno = EINVAL;
- gf_mgmt_hndsk_rsp rsp = {0,};
- gf_mgmt_hndsk_req arg = {{0,}};
- xlator_t *this = NULL;
- call_frame_t *frame = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peerctx_t *peerctx = NULL;
- dict_t *dict = NULL;
- dict_t *rsp_dict = NULL;
- glusterd_conf_t *conf = NULL;
- char msg[64] = {0,};
-
- this = THIS;
- conf = this->private;
- frame = myframe;
- peerctx = frame->local;
-
- rcu_read_lock ();
-
- peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
- if (!peerinfo) {
- ret = -1;
- gf_msg_debug (this->name, 0, "Could not find peer %s(%s)",
- peerctx->peername, uuid_utoa (peerctx->peerid));
- goto out;
- }
-
- if (-1 == req->rpc_status) {
- ret = -1;
- snprintf (msg, sizeof (msg),
- "Error through RPC layer, retry again later");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_RPC_LAYER_ERROR, "%s", msg);
- peerctx->errstr = gf_strdup (msg);
- goto out;
- }
-
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_mgmt_hndsk_rsp);
- if (ret < 0) {
- snprintf (msg, sizeof (msg), "Failed to decode management "
- "handshake response");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "%s", msg);
- peerctx->errstr = gf_strdup (msg);
- goto out;
- }
-
- GF_PROTOCOL_DICT_UNSERIALIZE (this, dict, rsp.hndsk.hndsk_val,
- rsp.hndsk.hndsk_len, ret, op_errno,
- out);
-
- op_errno = rsp.op_errno;
- if (-1 == rsp.op_ret) {
- gf_msg (this->name, GF_LOG_ERROR, op_errno,
- GD_MSG_VERS_GET_FAIL,
- "failed to get the 'versions' from peer (%s)",
- req->conn->trans->peerinfo.identifier);
- goto out;
- }
-
- /* Check if peer can be part of cluster */
- ret = gd_validate_peer_op_version (this, peerinfo, dict,
- &peerctx->errstr);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_VERSION_MISMATCH,
- "failed to validate the operating version of peer (%s)",
- peerinfo->hostname);
- goto out;
- }
-
- rsp_dict = dict_new ();
- if (!rsp_dict)
- goto out;
-
- ret = dict_set_int32 (rsp_dict, GD_OP_VERSION_KEY, conf->op_version);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "failed to set operating version in dict");
- goto out;
- }
-
- GF_PROTOCOL_DICT_SERIALIZE (this, rsp_dict, (&arg.hndsk.hndsk_val),
- arg.hndsk.hndsk_len, op_errno, out);
+ int ret = -1;
+ int op_errno = EINVAL;
+ gf_mgmt_hndsk_rsp rsp = {
+ 0,
+ };
+ gf_mgmt_hndsk_req arg = {{
+ 0,
+ }};
+ xlator_t *this = NULL;
+ call_frame_t *frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_t *peerctx = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ glusterd_conf_t *conf = NULL;
+ char msg[64] = {
+ 0,
+ };
+
+ this = THIS;
+ conf = this->private;
+ frame = myframe;
+ peerctx = frame->local;
+
+ rcu_read_lock();
+
+ peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
+ if (!peerinfo) {
+ ret = -1;
+ gf_msg_debug(this->name, 0, "Could not find peer %s(%s)",
+ peerctx->peername, uuid_utoa(peerctx->peerid));
+ goto out;
+ }
- ret = glusterd_submit_request (peerinfo->rpc, &arg, frame,
- &gd_clnt_mgmt_hndsk_prog,
- GD_MGMT_HNDSK_VERSIONS_ACK, NULL, this,
- glusterd_mgmt_hndsk_version_ack_cbk,
- (xdrproc_t)xdr_gf_mgmt_hndsk_req);
+ if (-1 == req->rpc_status) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Error through RPC layer, retry again later");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_LAYER_ERROR, "%s", msg);
+ peerctx->errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_mgmt_hndsk_rsp);
+ if (ret < 0) {
+ snprintf(msg, sizeof(msg),
+ "Failed to decode management "
+ "handshake response");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s", msg);
+ peerctx->errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ GF_PROTOCOL_DICT_UNSERIALIZE(this, dict, rsp.hndsk.hndsk_val,
+ rsp.hndsk.hndsk_len, ret, op_errno, out);
+
+ op_errno = rsp.op_errno;
+ if (-1 == rsp.op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, GD_MSG_VERS_GET_FAIL,
+ "failed to get the 'versions' from peer (%s)",
+ req->conn->trans->peerinfo.identifier);
+ goto out;
+ }
+
+ /* Check if peer can be part of cluster */
+ ret = gd_validate_peer_op_version(this, peerinfo, dict, &peerctx->errstr);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH,
+ "failed to validate the operating version of peer (%s)",
+ peerinfo->hostname);
+ goto out;
+ }
+
+ rsp_dict = dict_new();
+ if (!rsp_dict)
+ goto out;
+
+ ret = dict_set_int32(rsp_dict, GD_OP_VERSION_KEY, conf->op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set operating version in dict");
+ goto out;
+ }
+
+ GF_PROTOCOL_DICT_SERIALIZE(this, rsp_dict, (&arg.hndsk.hndsk_val),
+ arg.hndsk.hndsk_len, op_errno, out);
+
+ ret = glusterd_submit_request(
+ peerinfo->rpc, &arg, frame, &gd_clnt_mgmt_hndsk_prog,
+ GD_MGMT_HNDSK_VERSIONS_ACK, NULL, this,
+ glusterd_mgmt_hndsk_version_ack_cbk, (xdrproc_t)xdr_gf_mgmt_hndsk_req);
out:
- if (ret) {
- frame->local = NULL;
- STACK_DESTROY (frame->root);
- if (peerinfo)
- rpc_transport_disconnect (peerinfo->rpc->conn.trans,
- _gf_false);
- }
+ if (ret) {
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
+ if (peerinfo)
+ rpc_transport_disconnect(peerinfo->rpc->conn.trans, _gf_false);
+ }
- rcu_read_unlock ();
+ rcu_read_unlock();
- if (rsp.hndsk.hndsk_val)
- free (rsp.hndsk.hndsk_val);
+ if (rsp.hndsk.hndsk_val)
+ free(rsp.hndsk.hndsk_val);
- if (arg.hndsk.hndsk_val)
- GF_FREE (arg.hndsk.hndsk_val);
+ if (arg.hndsk.hndsk_val)
+ GF_FREE(arg.hndsk.hndsk_val);
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- if (rsp_dict)
- dict_unref (rsp_dict);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- return 0;
+ return 0;
}
int
-glusterd_mgmt_hndsk_version_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_hndsk_version_cbk(struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- return glusterd_big_locked_cbk (req, iov, count, myframe,
- __glusterd_mgmt_hndsk_version_cbk);
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ __glusterd_mgmt_hndsk_version_cbk);
}
int
-glusterd_mgmt_handshake (xlator_t *this, glusterd_peerctx_t *peerctx)
+glusterd_mgmt_handshake(xlator_t *this, glusterd_peerctx_t *peerctx)
{
- call_frame_t *frame = NULL;
- gf_mgmt_hndsk_req req = {{0,},};
- glusterd_peerinfo_t *peerinfo = NULL;
- dict_t *req_dict = NULL;
- int ret = -1;
-
- frame = create_frame (this, this->ctx->pool);
- if (!frame)
- goto out;
-
- frame->local = peerctx;
-
- req_dict = dict_new ();
- if (!req_dict)
- goto out;
-
- ret = dict_set_dynstr (req_dict, GD_PEER_ID_KEY,
- gf_strdup (uuid_utoa (MY_UUID)));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "failed to set peer ID in dict");
- goto out;
- }
-
- GF_PROTOCOL_DICT_SERIALIZE (this, req_dict, (&req.hndsk.hndsk_val),
- req.hndsk.hndsk_len, ret, out);
-
- rcu_read_lock ();
-
- peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
- if (!peerinfo) {
- gf_msg_debug (THIS->name, 0, "Could not find peer %s(%s)",
- peerctx->peername, uuid_utoa (peerctx->peerid));
- goto unlock;
- }
-
- ret = glusterd_submit_request (peerinfo->rpc, &req, frame,
- &gd_clnt_mgmt_hndsk_prog,
- GD_MGMT_HNDSK_VERSIONS, NULL, this,
- glusterd_mgmt_hndsk_version_cbk,
- (xdrproc_t)xdr_gf_mgmt_hndsk_req);
- ret = 0;
+ call_frame_t *frame = NULL;
+ gf_mgmt_hndsk_req req = {
+ {
+ 0,
+ },
+ };
+ glusterd_peerinfo_t *peerinfo = NULL;
+ dict_t *req_dict = NULL;
+ int ret = -1;
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame)
+ goto out;
+
+ frame->local = peerctx;
+
+ req_dict = dict_new();
+ if (!req_dict)
+ goto out;
+
+ ret = dict_set_dynstr(req_dict, GD_PEER_ID_KEY,
+ gf_strdup(uuid_utoa(MY_UUID)));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "failed to set peer ID in dict");
+ goto out;
+ }
+
+ GF_PROTOCOL_DICT_SERIALIZE(this, req_dict, (&req.hndsk.hndsk_val),
+ req.hndsk.hndsk_len, ret, out);
+
+ rcu_read_lock();
+
+ peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
+ if (!peerinfo) {
+ gf_msg_debug(THIS->name, 0, "Could not find peer %s(%s)",
+ peerctx->peername, uuid_utoa(peerctx->peerid));
+ goto unlock;
+ }
+
+ ret = glusterd_submit_request(
+ peerinfo->rpc, &req, frame, &gd_clnt_mgmt_hndsk_prog,
+ GD_MGMT_HNDSK_VERSIONS, NULL, this, glusterd_mgmt_hndsk_version_cbk,
+ (xdrproc_t)xdr_gf_mgmt_hndsk_req);
+ ret = 0;
unlock:
- rcu_read_unlock ();
+ rcu_read_unlock();
out:
- if (ret && frame)
- STACK_DESTROY (frame->root);
+ if (ret && frame)
+ STACK_DESTROY(frame->root);
- return ret;
+ return ret;
}
int
-glusterd_set_clnt_mgmt_program (glusterd_peerinfo_t *peerinfo,
- gf_prog_detail *prog)
+glusterd_set_clnt_mgmt_program(glusterd_peerinfo_t *peerinfo,
+ gf_prog_detail *prog)
{
- gf_prog_detail *trav = NULL;
- int ret = -1;
-
- if (!peerinfo || !prog)
- goto out;
-
- trav = prog;
-
- while (trav) {
- ret = -1;
- if ((gd_mgmt_prog.prognum == trav->prognum) &&
- (gd_mgmt_prog.progver == trav->progver)) {
- peerinfo->mgmt = &gd_mgmt_prog;
- ret = 0;
- }
-
- if ((gd_peer_prog.prognum == trav->prognum) &&
- (gd_peer_prog.progver == trav->progver)) {
- peerinfo->peer = &gd_peer_prog;
- ret = 0;
- }
-
- if (ret) {
- gf_msg_debug ("glusterd", 0,
- "%s (%"PRId64":%"PRId64") not supported",
- trav->progname, trav->prognum,
- trav->progver);
- }
+ gf_prog_detail *trav = NULL;
+ int ret = -1;
- trav = trav->next;
- }
+ if (!peerinfo || !prog)
+ goto out;
- if (peerinfo->mgmt) {
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_VERS_INFO,
- "Using Program %s, Num (%d), Version (%d)",
- peerinfo->mgmt->progname, peerinfo->mgmt->prognum,
- peerinfo->mgmt->progver);
- }
+ trav = prog;
- if (peerinfo->peer) {
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_VERS_INFO,
- "Using Program %s, Num (%d), Version (%d)",
- peerinfo->peer->progname, peerinfo->peer->prognum,
- peerinfo->peer->progver);
+ while (trav) {
+ ret = -1;
+ if ((gd_mgmt_prog.prognum == trav->prognum) &&
+ (gd_mgmt_prog.progver == trav->progver)) {
+ peerinfo->mgmt = &gd_mgmt_prog;
+ ret = 0;
}
- if (peerinfo->mgmt_v3) {
- gf_msg ("glusterd", GF_LOG_INFO, 0,
- GD_MSG_VERS_INFO,
- "Using Program %s, Num (%d), Version (%d)",
- peerinfo->mgmt_v3->progname,
- peerinfo->mgmt_v3->prognum,
- peerinfo->mgmt_v3->progver);
+ if ((gd_peer_prog.prognum == trav->prognum) &&
+ (gd_peer_prog.progver == trav->progver)) {
+ peerinfo->peer = &gd_peer_prog;
+ ret = 0;
}
- ret = 0;
+ if (ret) {
+ gf_msg_debug("glusterd", 0,
+ "%s (%" PRId64 ":%" PRId64 ") not supported",
+ trav->progname, trav->prognum, trav->progver);
+ }
+
+ trav = trav->next;
+ }
+
+ if (peerinfo->mgmt) {
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_VERS_INFO,
+ "Using Program %s, Num (%d), Version (%d)",
+ peerinfo->mgmt->progname, peerinfo->mgmt->prognum,
+ peerinfo->mgmt->progver);
+ }
+
+ if (peerinfo->peer) {
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_VERS_INFO,
+ "Using Program %s, Num (%d), Version (%d)",
+ peerinfo->peer->progname, peerinfo->peer->prognum,
+ peerinfo->peer->progver);
+ }
+
+ if (peerinfo->mgmt_v3) {
+ gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_VERS_INFO,
+ "Using Program %s, Num (%d), Version (%d)",
+ peerinfo->mgmt_v3->progname, peerinfo->mgmt_v3->prognum,
+ peerinfo->mgmt_v3->progver);
+ }
+
+ ret = 0;
out:
- return ret;
-
+ return ret;
}
static gf_boolean_t
-_mgmt_hndsk_prog_present (gf_prog_detail *prog) {
- gf_boolean_t ret = _gf_false;
- gf_prog_detail *trav = NULL;
+_mgmt_hndsk_prog_present(gf_prog_detail *prog)
+{
+ gf_boolean_t ret = _gf_false;
+ gf_prog_detail *trav = NULL;
- GF_ASSERT (prog);
+ GF_ASSERT(prog);
- trav = prog;
+ trav = prog;
- while (trav) {
- if ((trav->prognum == GD_MGMT_HNDSK_PROGRAM) &&
- (trav->progver == GD_MGMT_HNDSK_VERSION)) {
- ret = _gf_true;
- goto out;
- }
- trav = trav->next;
+ while (trav) {
+ if ((trav->prognum == GD_MGMT_HNDSK_PROGRAM) &&
+ (trav->progver == GD_MGMT_HNDSK_VERSION)) {
+ ret = _gf_true;
+ goto out;
}
+ trav = trav->next;
+ }
out:
- return ret;
+ return ret;
}
int
-__glusterd_peer_dump_version_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+__glusterd_peer_dump_version_cbk(struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- int ret = -1;
- gf_dump_rsp rsp = {0,};
- xlator_t *this = NULL;
- gf_prog_detail *trav = NULL;
- gf_prog_detail *next = NULL;
- call_frame_t *frame = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peerctx_t *peerctx = NULL;
- glusterd_conf_t *conf = NULL;
- char msg[1024] = {0,};
-
- this = THIS;
- conf = this->private;
- frame = myframe;
- peerctx = frame->local;
-
- rcu_read_lock ();
-
- peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
- if (!peerinfo) {
- gf_msg_debug (this->name, 0, "Couldn't find peer %s(%s)",
- peerctx->peername, uuid_utoa (peerctx->peerid));
- goto out;
- }
-
- if (-1 == req->rpc_status) {
- snprintf (msg, sizeof (msg),
- "Error through RPC layer, retry again later");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_RPC_LAYER_ERROR, "%s", msg);
- peerctx->errstr = gf_strdup (msg);
- goto out;
- }
-
- ret = xdr_to_generic (*iov, &rsp, (xdrproc_t)xdr_gf_dump_rsp);
- if (ret < 0) {
- snprintf (msg, sizeof (msg), "Failed to decode XDR");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "%s", msg);
- peerctx->errstr = gf_strdup (msg);
- goto out;
- }
- if (-1 == rsp.op_ret) {
- snprintf (msg, sizeof (msg),
- "Failed to get the 'versions' from remote server");
- gf_msg (frame->this->name, GF_LOG_ERROR, 0,
- GD_MSG_VERS_GET_FAIL, "%s", msg);
- peerctx->errstr = gf_strdup (msg);
- goto out;
- }
-
- if (_mgmt_hndsk_prog_present (rsp.prog)) {
- gf_msg_debug (this->name, 0,
- "Proceeding to op-version handshake with peer %s",
- peerinfo->hostname);
- ret = glusterd_mgmt_handshake (this, peerctx);
- goto out;
- } else if (conf->op_version > 1) {
- ret = -1;
- snprintf (msg, sizeof (msg),
- "Peer %s does not support required op-version",
- peerinfo->hostname);
- peerctx->errstr = gf_strdup (msg);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VERSION_UNSUPPORTED, "%s", msg);
- goto out;
- }
-
- /* Make sure we assign the proper program to peer */
- ret = glusterd_set_clnt_mgmt_program (peerinfo, rsp.prog);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_MGMT_PGM_SET_FAIL,
- "failed to set the mgmt program");
- goto out;
- }
+ int ret = -1;
+ gf_dump_rsp rsp = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ gf_prog_detail *trav = NULL;
+ gf_prog_detail *next = NULL;
+ call_frame_t *frame = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peerctx_t *peerctx = NULL;
+ glusterd_conf_t *conf = NULL;
+ char msg[1024] = {
+ 0,
+ };
+
+ this = THIS;
+ conf = this->private;
+ frame = myframe;
+ peerctx = frame->local;
+
+ rcu_read_lock();
+
+ peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
+ if (!peerinfo) {
+ gf_msg_debug(this->name, 0, "Couldn't find peer %s(%s)",
+ peerctx->peername, uuid_utoa(peerctx->peerid));
+ goto out;
+ }
+
+ if (-1 == req->rpc_status) {
+ snprintf(msg, sizeof(msg),
+ "Error through RPC layer, retry again later");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_LAYER_ERROR, "%s", msg);
+ peerctx->errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_dump_rsp);
+ if (ret < 0) {
+ snprintf(msg, sizeof(msg), "Failed to decode XDR");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s", msg);
+ peerctx->errstr = gf_strdup(msg);
+ goto out;
+ }
+ if (-1 == rsp.op_ret) {
+ snprintf(msg, sizeof(msg),
+ "Failed to get the 'versions' from remote server");
+ gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_VERS_GET_FAIL, "%s",
+ msg);
+ peerctx->errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ if (_mgmt_hndsk_prog_present(rsp.prog)) {
+ gf_msg_debug(this->name, 0,
+ "Proceeding to op-version handshake with peer %s",
+ peerinfo->hostname);
+ ret = glusterd_mgmt_handshake(this, peerctx);
+ goto out;
+ } else if (conf->op_version > 1) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Peer %s does not support required op-version",
+ peerinfo->hostname);
+ peerctx->errstr = gf_strdup(msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED, "%s",
+ msg);
+ goto out;
+ }
+
+ /* Make sure we assign the proper program to peer */
+ ret = glusterd_set_clnt_mgmt_program(peerinfo, rsp.prog);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_MGMT_PGM_SET_FAIL,
+ "failed to set the mgmt program");
+ goto out;
+ }
+
+ ret = default_notify(this, GF_EVENT_CHILD_UP, NULL);
+
+ if (GD_MODE_ON == peerctx->args.mode) {
+ (void)glusterd_event_connected_inject(peerctx);
+ peerctx->args.req = NULL;
+ } else if (GD_MODE_SWITCH_ON == peerctx->args.mode) {
+ peerctx->args.mode = GD_MODE_ON;
+ } else {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_UNKNOWN_MODE,
+ "unknown mode %d", peerctx->args.mode);
+ }
+
+ ret = 0;
- ret = default_notify (this, GF_EVENT_CHILD_UP, NULL);
+out:
+ if (ret != 0 && peerinfo)
+ rpc_transport_disconnect(peerinfo->rpc->conn.trans, _gf_false);
- if (GD_MODE_ON == peerctx->args.mode) {
- (void) glusterd_event_connected_inject (peerctx);
- peerctx->args.req = NULL;
- } else if (GD_MODE_SWITCH_ON == peerctx->args.mode) {
- peerctx->args.mode = GD_MODE_ON;
- } else {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_UNKNOWN_MODE, "unknown mode %d",
- peerctx->args.mode);
- }
+ rcu_read_unlock();
- ret = 0;
+ glusterd_friend_sm();
+ glusterd_op_sm();
-out:
- if (ret != 0 && peerinfo)
- rpc_transport_disconnect (peerinfo->rpc->conn.trans, _gf_false);
-
- rcu_read_unlock ();
-
- glusterd_friend_sm ();
- glusterd_op_sm ();
-
- /* don't use GF_FREE, buffer was allocated by libc */
- if (rsp.prog) {
- trav = rsp.prog;
- while (trav) {
- next = trav->next;
- free (trav->progname);
- free (trav);
- trav = next;
- }
+ /* don't use GF_FREE, buffer was allocated by libc */
+ if (rsp.prog) {
+ trav = rsp.prog;
+ while (trav) {
+ next = trav->next;
+ free(trav->progname);
+ free(trav);
+ trav = next;
}
+ }
- frame->local = NULL;
- STACK_DESTROY (frame->root);
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
- return 0;
+ return 0;
}
-
int
-glusterd_peer_dump_version_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_peer_dump_version_cbk(struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- return glusterd_big_locked_cbk (req, iov, count, myframe,
- __glusterd_peer_dump_version_cbk);
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ __glusterd_peer_dump_version_cbk);
}
int
-glusterd_peer_dump_version (xlator_t *this, struct rpc_clnt *rpc,
- glusterd_peerctx_t *peerctx)
+glusterd_peer_dump_version(xlator_t *this, struct rpc_clnt *rpc,
+ glusterd_peerctx_t *peerctx)
{
- call_frame_t *frame = NULL;
- gf_dump_req req = {0,};
- glusterd_peerinfo_t *peerinfo = NULL;
- int ret = -1;
-
- frame = create_frame (this, this->ctx->pool);
- if (!frame)
- goto out;
-
- frame->local = peerctx;
- if (!peerctx)
- goto out;
-
- rcu_read_lock ();
-
- peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
- if (!peerinfo) {
- gf_msg_debug (this->name, 0, "Couldn't find peer %s(%s)",
- peerctx->peername, uuid_utoa (peerctx->peerid));
- goto unlock;
- }
-
- req.gfs_id = 0xcafe;
-
- ret = glusterd_submit_request (peerinfo->rpc, &req, frame,
- &glusterd_dump_prog, GF_DUMP_DUMP,
- NULL, this,
- glusterd_peer_dump_version_cbk,
- (xdrproc_t)xdr_gf_dump_req);
+ call_frame_t *frame = NULL;
+ gf_dump_req req = {
+ 0,
+ };
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int ret = -1;
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame)
+ goto out;
+
+ frame->local = peerctx;
+ if (!peerctx)
+ goto out;
+
+ rcu_read_lock();
+
+ peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
+ if (!peerinfo) {
+ gf_msg_debug(this->name, 0, "Couldn't find peer %s(%s)",
+ peerctx->peername, uuid_utoa(peerctx->peerid));
+ goto unlock;
+ }
+
+ req.gfs_id = 0xcafe;
+
+ ret = glusterd_submit_request(
+ peerinfo->rpc, &req, frame, &glusterd_dump_prog, GF_DUMP_DUMP, NULL,
+ this, glusterd_peer_dump_version_cbk, (xdrproc_t)xdr_gf_dump_req);
unlock:
- rcu_read_unlock ();
+ rcu_read_unlock();
out:
- if (ret && frame)
- STACK_DESTROY (frame->root);
+ if (ret && frame)
+ STACK_DESTROY(frame->root);
- return ret;
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c
index 914d029ed39..4a482d5cfb7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-hooks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c
@@ -28,578 +28,562 @@
#include <fnmatch.h>
#define EMPTY ""
-char glusterd_hook_dirnames[GD_OP_MAX][256] =
-{
- [GD_OP_NONE] = EMPTY,
- [GD_OP_CREATE_VOLUME] = "create",
- [GD_OP_START_BRICK] = EMPTY,
- [GD_OP_STOP_BRICK] = EMPTY,
- [GD_OP_DELETE_VOLUME] = "delete",
- [GD_OP_START_VOLUME] = "start",
- [GD_OP_STOP_VOLUME] = "stop",
- [GD_OP_DEFRAG_VOLUME] = EMPTY,
- [GD_OP_ADD_BRICK] = "add-brick",
- [GD_OP_REMOVE_BRICK] = "remove-brick",
- [GD_OP_REPLACE_BRICK] = EMPTY,
- [GD_OP_SET_VOLUME] = "set",
- [GD_OP_RESET_VOLUME] = "reset",
- [GD_OP_SYNC_VOLUME] = EMPTY,
- [GD_OP_LOG_ROTATE] = EMPTY,
- [GD_OP_GSYNC_CREATE] = "gsync-create",
- [GD_OP_GSYNC_SET] = EMPTY,
- [GD_OP_PROFILE_VOLUME] = EMPTY,
- [GD_OP_QUOTA] = EMPTY,
- [GD_OP_STATUS_VOLUME] = EMPTY,
- [GD_OP_REBALANCE] = EMPTY,
- [GD_OP_HEAL_VOLUME] = EMPTY,
- [GD_OP_STATEDUMP_VOLUME] = EMPTY,
- [GD_OP_LIST_VOLUME] = EMPTY,
- [GD_OP_CLEARLOCKS_VOLUME] = EMPTY,
- [GD_OP_DEFRAG_BRICK_VOLUME] = EMPTY,
- [GD_OP_RESET_BRICK] = EMPTY,
+char glusterd_hook_dirnames[GD_OP_MAX][256] = {
+ [GD_OP_NONE] = EMPTY,
+ [GD_OP_CREATE_VOLUME] = "create",
+ [GD_OP_START_BRICK] = EMPTY,
+ [GD_OP_STOP_BRICK] = EMPTY,
+ [GD_OP_DELETE_VOLUME] = "delete",
+ [GD_OP_START_VOLUME] = "start",
+ [GD_OP_STOP_VOLUME] = "stop",
+ [GD_OP_DEFRAG_VOLUME] = EMPTY,
+ [GD_OP_ADD_BRICK] = "add-brick",
+ [GD_OP_REMOVE_BRICK] = "remove-brick",
+ [GD_OP_REPLACE_BRICK] = EMPTY,
+ [GD_OP_SET_VOLUME] = "set",
+ [GD_OP_RESET_VOLUME] = "reset",
+ [GD_OP_SYNC_VOLUME] = EMPTY,
+ [GD_OP_LOG_ROTATE] = EMPTY,
+ [GD_OP_GSYNC_CREATE] = "gsync-create",
+ [GD_OP_GSYNC_SET] = EMPTY,
+ [GD_OP_PROFILE_VOLUME] = EMPTY,
+ [GD_OP_QUOTA] = EMPTY,
+ [GD_OP_STATUS_VOLUME] = EMPTY,
+ [GD_OP_REBALANCE] = EMPTY,
+ [GD_OP_HEAL_VOLUME] = EMPTY,
+ [GD_OP_STATEDUMP_VOLUME] = EMPTY,
+ [GD_OP_LIST_VOLUME] = EMPTY,
+ [GD_OP_CLEARLOCKS_VOLUME] = EMPTY,
+ [GD_OP_DEFRAG_BRICK_VOLUME] = EMPTY,
+ [GD_OP_RESET_BRICK] = EMPTY,
};
#undef EMPTY
static gf_boolean_t
-glusterd_is_hook_enabled (char *script)
+glusterd_is_hook_enabled(char *script)
{
- return (script[0] == 'S' && (fnmatch ("*.rpmsave", script, 0) != 0)
- && (fnmatch ("*.rpmnew", script, 0) != 0));
+ return (script[0] == 'S' && (fnmatch("*.rpmsave", script, 0) != 0) &&
+ (fnmatch("*.rpmnew", script, 0) != 0));
}
int
-glusterd_hooks_create_hooks_directory (char *basedir)
+glusterd_hooks_create_hooks_directory(char *basedir)
{
- int ret = -1;
- int op = GD_OP_NONE;
- int type = GD_COMMIT_HOOK_NONE;
- char version_dir[PATH_MAX] = {0, };
- char path[PATH_MAX] = {0, };
- char *cmd_subdir = NULL;
- char type_subdir[GD_COMMIT_HOOK_MAX][256] = {{0, },
- "pre",
- "post"};
- glusterd_conf_t *priv = NULL;
- int32_t len = 0;
-
- priv = THIS->private;
-
- snprintf (path, sizeof (path), "%s/hooks", basedir);
- ret = mkdir_p (path, 0777, _gf_true);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_CRITICAL, errno,
- GD_MSG_CREATE_DIR_FAILED, "Unable to create %s",
- path);
- goto out;
+ int ret = -1;
+ int op = GD_OP_NONE;
+ int type = GD_COMMIT_HOOK_NONE;
+ char version_dir[PATH_MAX] = {
+ 0,
+ };
+ char path[PATH_MAX] = {
+ 0,
+ };
+ char *cmd_subdir = NULL;
+ char type_subdir[GD_COMMIT_HOOK_MAX][256] = {{
+ 0,
+ },
+ "pre",
+ "post"};
+ glusterd_conf_t *priv = NULL;
+ int32_t len = 0;
+
+ priv = THIS->private;
+
+ snprintf(path, sizeof(path), "%s/hooks", basedir);
+ ret = mkdir_p(path, 0777, _gf_true);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create %s", path);
+ goto out;
+ }
+
+ GLUSTERD_GET_HOOKS_DIR(version_dir, GLUSTERD_HOOK_VER, priv);
+ ret = mkdir_p(version_dir, 0777, _gf_true);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create %s", version_dir);
+ goto out;
+ }
+
+ for (op = GD_OP_NONE + 1; op < GD_OP_MAX; op++) {
+ cmd_subdir = glusterd_hooks_get_hooks_cmd_subdir(op);
+ if (strlen(cmd_subdir) == 0)
+ continue;
+
+ len = snprintf(path, sizeof(path), "%s/%s", version_dir, cmd_subdir);
+ if ((len < 0) || (len >= sizeof(path))) {
+ ret = -1;
+ goto out;
}
-
- GLUSTERD_GET_HOOKS_DIR (version_dir, GLUSTERD_HOOK_VER, priv);
- ret = mkdir_p (version_dir, 0777, _gf_true);
+ ret = mkdir_p(path, 0777, _gf_true);
if (ret) {
- gf_msg (THIS->name, GF_LOG_CRITICAL, errno,
- GD_MSG_CREATE_DIR_FAILED, "Unable to create %s",
- version_dir);
- goto out;
+ gf_msg(THIS->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED,
+ "Unable to create %s", path);
+ goto out;
}
- for (op = GD_OP_NONE+1; op < GD_OP_MAX; op++) {
- cmd_subdir = glusterd_hooks_get_hooks_cmd_subdir (op);
- if (strlen (cmd_subdir) == 0)
- continue;
-
- len = snprintf (path, sizeof (path), "%s/%s", version_dir,
- cmd_subdir);
- if ((len < 0) || (len >= sizeof(path))) {
- ret = -1;
- goto out;
- }
- ret = mkdir_p (path, 0777, _gf_true);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_CRITICAL, errno,
- GD_MSG_CREATE_DIR_FAILED,
- "Unable to create %s",
- path);
- goto out;
- }
-
- for (type = GD_COMMIT_HOOK_PRE; type < GD_COMMIT_HOOK_MAX;
- type++) {
- len = snprintf (path, sizeof (path), "%s/%s/%s",
- version_dir, cmd_subdir,
- type_subdir[type]);
- if ((len < 0) || (len >= sizeof(path))) {
- ret = -1;
- goto out;
- }
- ret = mkdir_p (path, 0777, _gf_true);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_CRITICAL, errno,
- GD_MSG_CREATE_DIR_FAILED,
- "Unable to create %s",
- path);
- goto out;
- }
- }
+ for (type = GD_COMMIT_HOOK_PRE; type < GD_COMMIT_HOOK_MAX; type++) {
+ len = snprintf(path, sizeof(path), "%s/%s/%s", version_dir,
+ cmd_subdir, type_subdir[type]);
+ if ((len < 0) || (len >= sizeof(path))) {
+ ret = -1;
+ goto out;
+ }
+ ret = mkdir_p(path, 0777, _gf_true);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_CREATE_DIR_FAILED, "Unable to create %s", path);
+ goto out;
+ }
}
+ }
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
-char*
-glusterd_hooks_get_hooks_cmd_subdir (glusterd_op_t op)
+char *
+glusterd_hooks_get_hooks_cmd_subdir(glusterd_op_t op)
{
- GF_ASSERT ((op > GD_OP_NONE) && (op < GD_OP_MAX));
+ GF_ASSERT((op > GD_OP_NONE) && (op < GD_OP_MAX));
- return glusterd_hook_dirnames[op];
+ return glusterd_hook_dirnames[op];
}
void
-glusterd_hooks_add_working_dir (runner_t *runner, glusterd_conf_t *priv)
+glusterd_hooks_add_working_dir(runner_t *runner, glusterd_conf_t *priv)
{
- runner_argprintf (runner, "--gd-workdir=%s", priv->workdir);
+ runner_argprintf(runner, "--gd-workdir=%s", priv->workdir);
}
void
-glusterd_hooks_add_op (runner_t *runner, char *op)
+glusterd_hooks_add_op(runner_t *runner, char *op)
{
- runner_argprintf (runner, "--volume-op=%s", op);
+ runner_argprintf(runner, "--volume-op=%s", op);
}
void
-glusterd_hooks_add_hooks_version (runner_t* runner)
+glusterd_hooks_add_hooks_version(runner_t *runner)
{
- runner_argprintf (runner, "--version=%d", GLUSTERD_HOOK_VER);
+ runner_argprintf(runner, "--version=%d", GLUSTERD_HOOK_VER);
}
static void
-glusterd_hooks_add_custom_args (dict_t *dict, runner_t *runner)
+glusterd_hooks_add_custom_args(dict_t *dict, runner_t *runner)
{
- char *hooks_args = NULL;
- int32_t ret = -1;
- xlator_t *this = NULL;
+ char *hooks_args = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
- GF_VALIDATE_OR_GOTO (this->name, dict, out);
- GF_VALIDATE_OR_GOTO (this->name, runner, out);
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+ GF_VALIDATE_OR_GOTO(this->name, runner, out);
- ret = dict_get_str (dict, "hooks_args", &hooks_args);
- if (ret)
- gf_msg_debug (this->name, 0,
- "No Hooks Arguments.");
- else
- gf_msg_debug (this->name, 0,
- "Hooks Args = %s", hooks_args);
+ ret = dict_get_str(dict, "hooks_args", &hooks_args);
+ if (ret)
+ gf_msg_debug(this->name, 0, "No Hooks Arguments.");
+ else
+ gf_msg_debug(this->name, 0, "Hooks Args = %s", hooks_args);
- if (hooks_args)
- runner_argprintf (runner, "%s", hooks_args);
+ if (hooks_args)
+ runner_argprintf(runner, "%s", hooks_args);
out:
- return;
+ return;
}
-
int
-glusterd_hooks_set_volume_args (dict_t *dict, runner_t *runner)
+glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner)
{
- int i = 0;
- int count = 0;
- int ret = -1;
- char query[1024] = {0,};
- char *key = NULL;
- char *value = NULL;
-
- ret = dict_get_int32 (dict, "count", &count);
+ int i = 0;
+ int count = 0;
+ int ret = -1;
+ char query[1024] = {
+ 0,
+ };
+ char *key = NULL;
+ char *value = NULL;
+
+ ret = dict_get_int32(dict, "count", &count);
+ if (ret)
+ goto out;
+
+ /* This will not happen unless op_ctx
+ * is corrupted*/
+ if (!count)
+ goto out;
+
+ runner_add_arg(runner, "-o");
+ for (i = 1; ret == 0; i++) {
+ snprintf(query, sizeof(query), "key%d", i);
+ ret = dict_get_str(dict, query, &key);
if (ret)
- goto out;
-
- /* This will not happen unless op_ctx
- * is corrupted*/
- if (!count)
- goto out;
+ continue;
- runner_add_arg (runner, "-o");
- for (i = 1; ret == 0; i++) {
- snprintf (query, sizeof (query), "key%d", i);
- ret = dict_get_str (dict, query, &key);
- if (ret)
- continue;
-
- snprintf (query, sizeof (query), "value%d", i);
- ret = dict_get_str (dict, query, &value);
- if (ret)
- continue;
+ snprintf(query, sizeof(query), "value%d", i);
+ ret = dict_get_str(dict, query, &value);
+ if (ret)
+ continue;
- runner_argprintf (runner, "%s=%s", key, value);
- }
+ runner_argprintf(runner, "%s=%s", key, value);
+ }
- glusterd_hooks_add_custom_args (dict, runner);
+ glusterd_hooks_add_custom_args(dict, runner);
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
static int
-glusterd_hooks_add_op_args (runner_t *runner, glusterd_op_t op,
- dict_t *op_ctx, glusterd_commit_hook_type_t type)
+glusterd_hooks_add_op_args(runner_t *runner, glusterd_op_t op, dict_t *op_ctx,
+ glusterd_commit_hook_type_t type)
{
- int vol_count = 0;
- gf_boolean_t truth = _gf_false;
- glusterd_volinfo_t *voliter = NULL;
- glusterd_conf_t *priv = NULL;
- int ret = -1;
-
- priv = THIS->private;
- cds_list_for_each_entry (voliter, &priv->volumes, vol_list) {
- if (glusterd_is_volume_started (voliter))
- vol_count++;
- }
-
- ret = 0;
- switch (op) {
- case GD_OP_START_VOLUME:
- if (type == GD_COMMIT_HOOK_PRE &&
- vol_count == 0)
- truth = _gf_true;
-
- else if (type == GD_COMMIT_HOOK_POST &&
- vol_count == 1)
- truth = _gf_true;
-
- else
- truth = _gf_false;
-
- runner_argprintf (runner, "--first=%s",
- truth? "yes":"no");
-
- glusterd_hooks_add_hooks_version (runner);
- glusterd_hooks_add_op (runner, "start");
- glusterd_hooks_add_working_dir (runner, priv);
-
- break;
-
- case GD_OP_STOP_VOLUME:
- if (type == GD_COMMIT_HOOK_PRE &&
- vol_count == 1)
- truth = _gf_true;
-
- else if (type == GD_COMMIT_HOOK_POST &&
- vol_count == 0)
- truth = _gf_true;
-
- else
- truth = _gf_false;
-
- runner_argprintf (runner, "--last=%s",
- truth? "yes":"no");
- break;
-
- case GD_OP_SET_VOLUME:
- ret = glusterd_hooks_set_volume_args (op_ctx, runner);
- glusterd_hooks_add_working_dir (runner, priv);
- break;
-
- case GD_OP_GSYNC_CREATE:
- glusterd_hooks_add_custom_args (op_ctx, runner);
- break;
-
- case GD_OP_ADD_BRICK:
- glusterd_hooks_add_hooks_version (runner);
- glusterd_hooks_add_op (runner, "add-brick");
- glusterd_hooks_add_working_dir (runner, priv);
- break;
-
- case GD_OP_RESET_VOLUME:
- glusterd_hooks_add_hooks_version (runner);
- glusterd_hooks_add_op (runner, "reset");
- glusterd_hooks_add_working_dir (runner, priv);
- break;
-
- default:
- break;
-
- }
-
- return ret;
+ int vol_count = 0;
+ gf_boolean_t truth = _gf_false;
+ glusterd_volinfo_t *voliter = NULL;
+ glusterd_conf_t *priv = NULL;
+ int ret = -1;
+
+ priv = THIS->private;
+ cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
+ {
+ if (glusterd_is_volume_started(voliter))
+ vol_count++;
+ }
+
+ ret = 0;
+ switch (op) {
+ case GD_OP_START_VOLUME:
+ if (type == GD_COMMIT_HOOK_PRE && vol_count == 0)
+ truth = _gf_true;
+
+ else if (type == GD_COMMIT_HOOK_POST && vol_count == 1)
+ truth = _gf_true;
+
+ else
+ truth = _gf_false;
+
+ runner_argprintf(runner, "--first=%s", truth ? "yes" : "no");
+
+ glusterd_hooks_add_hooks_version(runner);
+ glusterd_hooks_add_op(runner, "start");
+ glusterd_hooks_add_working_dir(runner, priv);
+
+ break;
+
+ case GD_OP_STOP_VOLUME:
+ if (type == GD_COMMIT_HOOK_PRE && vol_count == 1)
+ truth = _gf_true;
+
+ else if (type == GD_COMMIT_HOOK_POST && vol_count == 0)
+ truth = _gf_true;
+
+ else
+ truth = _gf_false;
+
+ runner_argprintf(runner, "--last=%s", truth ? "yes" : "no");
+ break;
+
+ case GD_OP_SET_VOLUME:
+ ret = glusterd_hooks_set_volume_args(op_ctx, runner);
+ glusterd_hooks_add_working_dir(runner, priv);
+ break;
+
+ case GD_OP_GSYNC_CREATE:
+ glusterd_hooks_add_custom_args(op_ctx, runner);
+ break;
+
+ case GD_OP_ADD_BRICK:
+ glusterd_hooks_add_hooks_version(runner);
+ glusterd_hooks_add_op(runner, "add-brick");
+ glusterd_hooks_add_working_dir(runner, priv);
+ break;
+
+ case GD_OP_RESET_VOLUME:
+ glusterd_hooks_add_hooks_version(runner);
+ glusterd_hooks_add_op(runner, "reset");
+ glusterd_hooks_add_working_dir(runner, priv);
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
}
int
-glusterd_hooks_run_hooks (char *hooks_path, glusterd_op_t op, dict_t *op_ctx,
- glusterd_commit_hook_type_t type)
+glusterd_hooks_run_hooks(char *hooks_path, glusterd_op_t op, dict_t *op_ctx,
+ glusterd_commit_hook_type_t type)
{
- xlator_t *this = NULL;
- runner_t runner = {0,};
- DIR *hookdir = NULL;
- struct dirent *entry = NULL;
- struct dirent scratch[2] = {{0,},};
- char *volname = NULL;
- char **lines = NULL;
- int N = 8; /*arbitrary*/
- int lineno = 0;
- int line_count = 0;
- int ret = -1;
-
- this = THIS;
-
- ret = dict_get_str (op_ctx, "volname", &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_CRITICAL, errno,
- GD_MSG_DICT_GET_FAILED, "Failed to get volname "
- "from operation context");
- goto out;
- }
+ xlator_t *this = NULL;
+ runner_t runner = {
+ 0,
+ };
+ DIR *hookdir = NULL;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {
+ {
+ 0,
+ },
+ };
+ char *volname = NULL;
+ char **lines = NULL;
+ int N = 8; /*arbitrary*/
+ int lineno = 0;
+ int line_count = 0;
+ int ret = -1;
+
+ this = THIS;
+
+ ret = dict_get_str(op_ctx, "volname", &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_DICT_GET_FAILED,
+ "Failed to get volname "
+ "from operation context");
+ goto out;
+ }
+
+ hookdir = sys_opendir(hooks_path);
+ if (!hookdir) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED,
+ "Failed to open dir %s", hooks_path);
+ goto out;
+ }
- hookdir = sys_opendir (hooks_path);
- if (!hookdir) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DIR_OP_FAILED,
- "Failed to open dir %s",
- hooks_path);
+ lines = GF_CALLOC(1, N * sizeof(*lines), gf_gld_mt_charptr);
+ if (!lines) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = -1;
+ line_count = 0;
+ GF_SKIP_IRRELEVANT_ENTRIES(entry, hookdir, scratch);
+ while (entry) {
+ if (line_count == N - 1) {
+ N *= 2;
+ lines = GF_REALLOC(lines, N * sizeof(char *));
+ if (!lines)
goto out;
}
- lines = GF_CALLOC (1, N * sizeof (*lines), gf_gld_mt_charptr);
- if (!lines) {
- ret = -1;
- goto out;
+ if (glusterd_is_hook_enabled(entry->d_name)) {
+ lines[line_count] = gf_strdup(entry->d_name);
+ line_count++;
}
- ret = -1;
- line_count = 0;
- GF_SKIP_IRRELEVANT_ENTRIES (entry, hookdir, scratch);
- while (entry) {
- if (line_count == N-1) {
- N *= 2;
- lines = GF_REALLOC (lines, N * sizeof (char *));
- if (!lines)
- goto out;
- }
-
- if (glusterd_is_hook_enabled (entry->d_name)) {
- lines[line_count] = gf_strdup (entry->d_name);
- line_count++;
- }
-
- GF_SKIP_IRRELEVANT_ENTRIES (entry, hookdir, scratch);
- }
+ GF_SKIP_IRRELEVANT_ENTRIES(entry, hookdir, scratch);
+ }
- lines[line_count] = NULL;
- lines = GF_REALLOC (lines, (line_count + 1) * sizeof (char *));
- if (!lines)
- goto out;
+ lines[line_count] = NULL;
+ lines = GF_REALLOC(lines, (line_count + 1) * sizeof(char *));
+ if (!lines)
+ goto out;
- qsort (lines, line_count, sizeof (*lines), glusterd_compare_lines);
-
- for (lineno = 0; lineno < line_count; lineno++) {
-
- runinit (&runner);
- runner_argprintf (&runner, "%s/%s", hooks_path, lines[lineno]);
- /*Add future command line arguments to hook scripts below*/
- runner_argprintf (&runner, "--volname=%s", volname);
- ret = glusterd_hooks_add_op_args (&runner, op, op_ctx, type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_ADD_OP_ARGS_FAIL, "Failed to add "
- "command specific arguments");
- goto out;
- }
-
- ret = runner_run_reuse (&runner);
- if (ret) {
- runner_log (&runner, this->name, GF_LOG_ERROR,
- "Failed to execute script");
- } else {
- runner_log (&runner, this->name, GF_LOG_INFO,
- "Ran script");
- }
- runner_end (&runner);
+ qsort(lines, line_count, sizeof(*lines), glusterd_compare_lines);
+
+ for (lineno = 0; lineno < line_count; lineno++) {
+ runinit(&runner);
+ runner_argprintf(&runner, "%s/%s", hooks_path, lines[lineno]);
+ /*Add future command line arguments to hook scripts below*/
+ runner_argprintf(&runner, "--volname=%s", volname);
+ ret = glusterd_hooks_add_op_args(&runner, op, op_ctx, type);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_ADD_OP_ARGS_FAIL,
+ "Failed to add "
+ "command specific arguments");
+ goto out;
+ }
+
+ ret = runner_run_reuse(&runner);
+ if (ret) {
+ runner_log(&runner, this->name, GF_LOG_ERROR,
+ "Failed to execute script");
+ } else {
+ runner_log(&runner, this->name, GF_LOG_INFO, "Ran script");
}
+ runner_end(&runner);
+ }
- ret = 0;
+ ret = 0;
out:
- if (lines) {
- for (lineno = 0; lineno < line_count+1; lineno++)
- GF_FREE (lines[lineno]);
+ if (lines) {
+ for (lineno = 0; lineno < line_count + 1; lineno++)
+ GF_FREE(lines[lineno]);
- GF_FREE (lines);
- }
+ GF_FREE(lines);
+ }
- if (hookdir)
- sys_closedir (hookdir);
+ if (hookdir)
+ sys_closedir(hookdir);
- return ret;
+ return ret;
}
int
-glusterd_hooks_post_stub_enqueue (char *scriptdir, glusterd_op_t op,
- dict_t *op_ctx)
+glusterd_hooks_post_stub_enqueue(char *scriptdir, glusterd_op_t op,
+ dict_t *op_ctx)
{
- int ret = -1;
- glusterd_hooks_stub_t *stub = NULL;
- glusterd_hooks_private_t *hooks_priv = NULL;
- glusterd_conf_t *conf = NULL;
-
- conf = THIS->private;
- hooks_priv = conf->hooks_priv;
-
- ret = glusterd_hooks_stub_init (&stub, scriptdir, op, op_ctx);
- if (ret)
- goto out;
-
- pthread_mutex_lock (&hooks_priv->mutex);
- {
- hooks_priv->waitcount++;
- cds_list_add_tail (&stub->all_hooks, &hooks_priv->list);
- pthread_cond_signal (&hooks_priv->cond);
- }
- pthread_mutex_unlock (&hooks_priv->mutex);
-
- ret = 0;
+ int ret = -1;
+ glusterd_hooks_stub_t *stub = NULL;
+ glusterd_hooks_private_t *hooks_priv = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ conf = THIS->private;
+ hooks_priv = conf->hooks_priv;
+
+ ret = glusterd_hooks_stub_init(&stub, scriptdir, op, op_ctx);
+ if (ret)
+ goto out;
+
+ pthread_mutex_lock(&hooks_priv->mutex);
+ {
+ hooks_priv->waitcount++;
+ cds_list_add_tail(&stub->all_hooks, &hooks_priv->list);
+ pthread_cond_signal(&hooks_priv->cond);
+ }
+ pthread_mutex_unlock(&hooks_priv->mutex);
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
int
-glusterd_hooks_stub_init (glusterd_hooks_stub_t **stub, char *scriptdir,
- glusterd_op_t op, dict_t *op_ctx)
+glusterd_hooks_stub_init(glusterd_hooks_stub_t **stub, char *scriptdir,
+ glusterd_op_t op, dict_t *op_ctx)
{
- int ret = -1;
- glusterd_hooks_stub_t *hooks_stub = NULL;
+ int ret = -1;
+ glusterd_hooks_stub_t *hooks_stub = NULL;
- GF_ASSERT (stub);
- if (!stub)
- goto out;
+ GF_ASSERT(stub);
+ if (!stub)
+ goto out;
- hooks_stub = GF_CALLOC (1, sizeof (*hooks_stub),
- gf_gld_mt_hooks_stub_t);
- if (!hooks_stub)
- goto out;
+ hooks_stub = GF_CALLOC(1, sizeof(*hooks_stub), gf_gld_mt_hooks_stub_t);
+ if (!hooks_stub)
+ goto out;
- CDS_INIT_LIST_HEAD (&hooks_stub->all_hooks);
- hooks_stub->op = op;
- hooks_stub->scriptdir = gf_strdup (scriptdir);
- if (!hooks_stub->scriptdir)
- goto out;
+ CDS_INIT_LIST_HEAD(&hooks_stub->all_hooks);
+ hooks_stub->op = op;
+ hooks_stub->scriptdir = gf_strdup(scriptdir);
+ if (!hooks_stub->scriptdir)
+ goto out;
- hooks_stub->op_ctx = dict_copy_with_ref (op_ctx, hooks_stub->op_ctx);
- if (!hooks_stub->op_ctx)
- goto out;
+ hooks_stub->op_ctx = dict_copy_with_ref(op_ctx, hooks_stub->op_ctx);
+ if (!hooks_stub->op_ctx)
+ goto out;
- *stub = hooks_stub;
- ret = 0;
+ *stub = hooks_stub;
+ ret = 0;
out:
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_POST_HOOK_STUB_INIT_FAIL, "Failed to initialize "
- "post hooks stub");
- glusterd_hooks_stub_cleanup (hooks_stub);
- }
-
- return ret;
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_POST_HOOK_STUB_INIT_FAIL,
+ "Failed to initialize "
+ "post hooks stub");
+ glusterd_hooks_stub_cleanup(hooks_stub);
+ }
+
+ return ret;
}
void
-glusterd_hooks_stub_cleanup (glusterd_hooks_stub_t *stub)
+glusterd_hooks_stub_cleanup(glusterd_hooks_stub_t *stub)
{
- if (!stub) {
- gf_msg_callingfn (THIS->name, GF_LOG_WARNING, 0,
- GD_MSG_HOOK_STUB_NULL,
- "hooks_stub is NULL");
- return;
- }
+ if (!stub) {
+ gf_msg_callingfn(THIS->name, GF_LOG_WARNING, 0, GD_MSG_HOOK_STUB_NULL,
+ "hooks_stub is NULL");
+ return;
+ }
- if (stub->op_ctx)
- dict_unref (stub->op_ctx);
+ if (stub->op_ctx)
+ dict_unref(stub->op_ctx);
- GF_FREE (stub->scriptdir);
+ GF_FREE(stub->scriptdir);
- GF_FREE (stub);
+ GF_FREE(stub);
}
-static void*
-hooks_worker (void *args)
+static void *
+hooks_worker(void *args)
{
- glusterd_conf_t *conf = NULL;
- glusterd_hooks_private_t *hooks_priv = NULL;
- glusterd_hooks_stub_t *stub = NULL;
-
- THIS = args;
- conf = THIS->private;
- hooks_priv = conf->hooks_priv;
-
- for (;;) {
- pthread_mutex_lock (&hooks_priv->mutex);
- {
- while (cds_list_empty (&hooks_priv->list)) {
- pthread_cond_wait (&hooks_priv->cond,
- &hooks_priv->mutex);
- }
- stub = cds_list_entry (hooks_priv->list.next,
- glusterd_hooks_stub_t,
- all_hooks);
- cds_list_del_init (&stub->all_hooks);
- hooks_priv->waitcount--;
-
- }
- pthread_mutex_unlock (&hooks_priv->mutex);
-
- glusterd_hooks_run_hooks (stub->scriptdir, stub->op,
- stub->op_ctx, GD_COMMIT_HOOK_POST);
- glusterd_hooks_stub_cleanup (stub);
+ glusterd_conf_t *conf = NULL;
+ glusterd_hooks_private_t *hooks_priv = NULL;
+ glusterd_hooks_stub_t *stub = NULL;
+
+ THIS = args;
+ conf = THIS->private;
+ hooks_priv = conf->hooks_priv;
+
+ for (;;) {
+ pthread_mutex_lock(&hooks_priv->mutex);
+ {
+ while (cds_list_empty(&hooks_priv->list)) {
+ pthread_cond_wait(&hooks_priv->cond, &hooks_priv->mutex);
+ }
+ stub = cds_list_entry(hooks_priv->list.next, glusterd_hooks_stub_t,
+ all_hooks);
+ cds_list_del_init(&stub->all_hooks);
+ hooks_priv->waitcount--;
}
+ pthread_mutex_unlock(&hooks_priv->mutex);
+
+ glusterd_hooks_run_hooks(stub->scriptdir, stub->op, stub->op_ctx,
+ GD_COMMIT_HOOK_POST);
+ glusterd_hooks_stub_cleanup(stub);
+ }
- return NULL;
+ return NULL;
}
int
-glusterd_hooks_priv_init (glusterd_hooks_private_t **new)
+glusterd_hooks_priv_init(glusterd_hooks_private_t **new)
{
- int ret = -1;
- glusterd_hooks_private_t *hooks_priv = NULL;
+ int ret = -1;
+ glusterd_hooks_private_t *hooks_priv = NULL;
- if (!new)
- goto out;
+ if (!new)
+ goto out;
- hooks_priv = GF_CALLOC (1, sizeof (*hooks_priv),
- gf_gld_mt_hooks_priv_t);
- if (!hooks_priv)
- goto out;
+ hooks_priv = GF_CALLOC(1, sizeof(*hooks_priv), gf_gld_mt_hooks_priv_t);
+ if (!hooks_priv)
+ goto out;
- pthread_mutex_init (&hooks_priv->mutex, NULL);
- pthread_cond_init (&hooks_priv->cond, NULL);
- CDS_INIT_LIST_HEAD (&hooks_priv->list);
- hooks_priv->waitcount = 0;
+ pthread_mutex_init(&hooks_priv->mutex, NULL);
+ pthread_cond_init(&hooks_priv->cond, NULL);
+ CDS_INIT_LIST_HEAD(&hooks_priv->list);
+ hooks_priv->waitcount = 0;
- *new = hooks_priv;
- ret = 0;
+ *new = hooks_priv;
+ ret = 0;
out:
- return ret;
+ return ret;
}
int
-glusterd_hooks_spawn_worker (xlator_t *this)
+glusterd_hooks_spawn_worker(xlator_t *this)
{
- int ret = -1;
- glusterd_conf_t *conf = NULL;
- glusterd_hooks_private_t *hooks_priv = NULL;
-
-
- ret = glusterd_hooks_priv_init (&hooks_priv);
- if (ret)
- goto out;
-
- conf = this->private;
- conf->hooks_priv = hooks_priv;
- ret = gf_thread_create (&hooks_priv->worker, NULL, hooks_worker,
- (void *)this, "gdhooks");
- if (ret)
- gf_msg (this->name, GF_LOG_CRITICAL, errno,
- GD_MSG_SPAWN_THREADS_FAIL, "Failed to spawn post "
- "hooks worker thread");
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ glusterd_hooks_private_t *hooks_priv = NULL;
+
+ ret = glusterd_hooks_priv_init(&hooks_priv);
+ if (ret)
+ goto out;
+
+ conf = this->private;
+ conf->hooks_priv = hooks_priv;
+ ret = gf_thread_create(&hooks_priv->worker, NULL, hooks_worker,
+ (void *)this, "gdhooks");
+ if (ret)
+ gf_msg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_SPAWN_THREADS_FAIL,
+ "Failed to spawn post "
+ "hooks worker thread");
out:
- return ret;
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index f9b0409b60d..4a7a5ba4479 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -28,205 +28,201 @@
/* Valid entities that the mgmt_v3 lock can hold locks upon *
* To add newer entities to be locked, we can just add more *
* entries to this table along with the type and default value */
-glusterd_valid_entities valid_types[] = {
- { "vol", _gf_true },
- { "snap", _gf_false },
- { "global", _gf_false},
- { NULL },
+glusterd_valid_entities valid_types[] = {
+ {"vol", _gf_true},
+ {"snap", _gf_false},
+ {"global", _gf_false},
+ {NULL},
};
/* Checks if the lock request is for a valid entity */
gf_boolean_t
-glusterd_mgmt_v3_is_type_valid (char *type)
+glusterd_mgmt_v3_is_type_valid(char *type)
{
- int32_t i = 0;
- gf_boolean_t ret = _gf_false;
+ int32_t i = 0;
+ gf_boolean_t ret = _gf_false;
- GF_ASSERT (type);
+ GF_ASSERT(type);
- for (i = 0; valid_types[i].type; i++) {
- if (!strcmp (type, valid_types[i].type)) {
- ret = _gf_true;
- break;
- }
+ for (i = 0; valid_types[i].type; i++) {
+ if (!strcmp(type, valid_types[i].type)) {
+ ret = _gf_true;
+ break;
}
+ }
- return ret;
+ return ret;
}
/* Initialize the global mgmt_v3 lock list(dict) when
* glusterd is spawned */
int32_t
-glusterd_mgmt_v3_lock_init ()
+glusterd_mgmt_v3_lock_init()
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
- priv->mgmt_v3_lock = dict_new ();
- if (!priv->mgmt_v3_lock)
- goto out;
+ priv->mgmt_v3_lock = dict_new();
+ if (!priv->mgmt_v3_lock)
+ goto out;
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
/* Destroy the global mgmt_v3 lock list(dict) when
* glusterd cleanup is performed */
void
-glusterd_mgmt_v3_lock_fini ()
+glusterd_mgmt_v3_lock_fini()
{
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
- if (priv->mgmt_v3_lock)
- dict_unref (priv->mgmt_v3_lock);
+ if (priv->mgmt_v3_lock)
+ dict_unref(priv->mgmt_v3_lock);
}
/* Initialize the global mgmt_v3_timer lock list(dict) when
* glusterd is spawned */
int32_t
-glusterd_mgmt_v3_lock_timer_init ()
+glusterd_mgmt_v3_lock_timer_init()
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
- priv->mgmt_v3_lock_timer = dict_new ();
- if (!priv->mgmt_v3_lock_timer)
- goto out;
+ priv->mgmt_v3_lock_timer = dict_new();
+ if (!priv->mgmt_v3_lock_timer)
+ goto out;
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
/* Destroy the global mgmt_v3_timer lock list(dict) when
* glusterd cleanup is performed */
void
-glusterd_mgmt_v3_lock_timer_fini ()
+glusterd_mgmt_v3_lock_timer_fini()
{
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
- if (priv->mgmt_v3_lock_timer)
- dict_unref (priv->mgmt_v3_lock_timer);
+ if (priv->mgmt_v3_lock_timer)
+ dict_unref(priv->mgmt_v3_lock_timer);
out:
- return;
+ return;
}
int32_t
-glusterd_get_mgmt_v3_lock_owner (char *key, uuid_t *uuid)
+glusterd_get_mgmt_v3_lock_owner(char *key, uuid_t *uuid)
{
- int32_t ret = -1;
- glusterd_mgmt_v3_lock_obj *lock_obj = NULL;
- glusterd_conf_t *priv = NULL;
- uuid_t no_owner = {0,};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- if (!key || !uuid) {
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "key or uuid is null.");
- ret = -1;
- goto out;
- }
+ int32_t ret = -1;
+ glusterd_mgmt_v3_lock_obj *lock_obj = NULL;
+ glusterd_conf_t *priv = NULL;
+ uuid_t no_owner = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (!key || !uuid) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "key or uuid is null.");
+ ret = -1;
+ goto out;
+ }
- ret = dict_get_bin (priv->mgmt_v3_lock, key, (void **) &lock_obj);
- if (!ret)
- gf_uuid_copy (*uuid, lock_obj->lock_owner);
- else
- gf_uuid_copy (*uuid, no_owner);
+ ret = dict_get_bin(priv->mgmt_v3_lock, key, (void **)&lock_obj);
+ if (!ret)
+ gf_uuid_copy(*uuid, lock_obj->lock_owner);
+ else
+ gf_uuid_copy(*uuid, no_owner);
- ret = 0;
+ ret = 0;
out:
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
/* This function is called with the locked_count and type, to *
* release all the acquired locks. */
static int32_t
-glusterd_release_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
- int32_t locked_count,
- char *type)
+glusterd_release_multiple_locks_per_entity(dict_t *dict, uuid_t uuid,
+ int32_t locked_count, char *type)
{
- char name_buf[PATH_MAX] = "";
- char *name = NULL;
- int32_t i = -1;
- int32_t op_ret = 0;
- int32_t ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT(this);
- GF_ASSERT (dict);
- GF_ASSERT (type);
-
- if (locked_count == 0) {
- gf_msg_debug (this->name, 0,
- "No %s locked as part of this transaction",
- type);
- goto out;
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t i = -1;
+ int32_t op_ret = 0;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(type);
+
+ if (locked_count == 0) {
+ gf_msg_debug(this->name, 0, "No %s locked as part of this transaction",
+ type);
+ goto out;
+ }
+
+ /* Release all the locks held */
+ for (i = 0; i < locked_count; i++) {
+ snprintf(name_buf, sizeof(name_buf), "%sname%d", type, i + 1);
+
+ /* Looking for volname1, volname2 or snapname1, *
+ * as key in the dict snapname2 */
+ ret = dict_get_str(dict, name_buf, &name);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get %s locked_count = %d", name_buf,
+ locked_count);
+ op_ret = ret;
+ continue;
}
- /* Release all the locks held */
- for (i = 0; i < locked_count; i++) {
- snprintf (name_buf, sizeof(name_buf),
- "%sname%d", type, i+1);
-
- /* Looking for volname1, volname2 or snapname1, *
- * as key in the dict snapname2 */
- ret = dict_get_str (dict, name_buf, &name);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get %s locked_count = %d",
- name_buf, locked_count);
- op_ret = ret;
- continue;
- }
-
- ret = glusterd_mgmt_v3_unlock (name, uuid, type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Failed to release lock for %s.",
- name);
- op_ret = ret;
- }
+ ret = glusterd_mgmt_v3_unlock(name, uuid, type);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Failed to release lock for %s.", name);
+ op_ret = ret;
}
+ }
out:
- gf_msg_trace (this->name, 0, "Returning %d", op_ret);
- return op_ret;
+ gf_msg_trace(this->name, 0, "Returning %d", op_ret);
+ return op_ret;
}
/* Given the count and type of the entity this function acquires *
@@ -234,71 +230,64 @@ out:
* If type is "vol" this function tries to acquire locks on multiple *
* volumes */
static int32_t
-glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
- uint32_t *op_errno,
- int32_t count, char *type)
+glusterd_acquire_multiple_locks_per_entity(dict_t *dict, uuid_t uuid,
+ uint32_t *op_errno, int32_t count,
+ char *type)
{
- char name_buf[PATH_MAX] = "";
- char *name = NULL;
- int32_t i = -1;
- int32_t ret = -1;
- int32_t locked_count = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT(this);
- GF_ASSERT (dict);
- GF_ASSERT (type);
-
- /* Locking one element after other */
- for (i = 0; i < count; i++) {
- snprintf (name_buf, sizeof(name_buf),
- "%sname%d", type, i+1);
-
- /* Looking for volname1, volname2 or snapname1, *
- * as key in the dict snapname2 */
- ret = dict_get_str (dict, name_buf, &name);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get %s count = %d",
- name_buf, count);
- break;
- }
-
- ret = glusterd_mgmt_v3_lock (name, uuid, op_errno, type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL,
- "Failed to acquire lock for %s %s "
- "on behalf of %s. Reversing "
- "this transaction", type, name,
- uuid_utoa(uuid));
- break;
- }
- locked_count++;
- }
-
- if (count == locked_count) {
- /* If all locking ops went successfully, return as success */
- ret = 0;
- goto out;
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t i = -1;
+ int32_t ret = -1;
+ int32_t locked_count = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(type);
+
+ /* Locking one element after other */
+ for (i = 0; i < count; i++) {
+ snprintf(name_buf, sizeof(name_buf), "%sname%d", type, i + 1);
+
+ /* Looking for volname1, volname2 or snapname1, *
+ * as key in the dict snapname2 */
+ ret = dict_get_str(dict, name_buf, &name);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get %s count = %d", name_buf, count);
+ break;
}
- /* If we failed to lock one element, unlock others and return failure */
- ret = glusterd_release_multiple_locks_per_entity (dict, uuid,
- locked_count,
- type);
+ ret = glusterd_mgmt_v3_lock(name, uuid, op_errno, type);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MULTIPLE_LOCK_RELEASE_FAIL,
- "Failed to release multiple %s locks",
- type);
- }
- ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "Failed to acquire lock for %s %s "
+ "on behalf of %s. Reversing "
+ "this transaction",
+ type, name, uuid_utoa(uuid));
+ break;
+ }
+ locked_count++;
+ }
+
+ if (count == locked_count) {
+ /* If all locking ops went successfully, return as success */
+ ret = 0;
+ goto out;
+ }
+
+ /* If we failed to lock one element, unlock others and return failure */
+ ret = glusterd_release_multiple_locks_per_entity(dict, uuid, locked_count,
+ type);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MULTIPLE_LOCK_RELEASE_FAIL,
+ "Failed to release multiple %s locks", type);
+ }
+ ret = -1;
out:
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
/* Given the type of entity, this function figures out if it should unlock a *
@@ -306,74 +295,69 @@ out:
* if the type is "vol", this function will accordingly unlock a single volume *
* or multiple volumes */
static int32_t
-glusterd_mgmt_v3_unlock_entity (dict_t *dict, uuid_t uuid, char *type,
- gf_boolean_t default_value)
+glusterd_mgmt_v3_unlock_entity(dict_t *dict, uuid_t uuid, char *type,
+ gf_boolean_t default_value)
{
- char name_buf[PATH_MAX] = "";
- char *name = NULL;
- int32_t count = -1;
- int32_t ret = -1;
- gf_boolean_t hold_locks = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT(this);
- GF_ASSERT (dict);
- GF_ASSERT (type);
-
- snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
- hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
-
- if (hold_locks == _gf_false) {
- /* Locks were not held for this particular entity *
- * Hence nothing to release */
- ret = 0;
- goto out;
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t count = -1;
+ int32_t ret = -1;
+ gf_boolean_t hold_locks = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(type);
+
+ snprintf(name_buf, sizeof(name_buf), "hold_%s_locks", type);
+ hold_locks = dict_get_str_boolean(dict, name_buf, default_value);
+
+ if (hold_locks == _gf_false) {
+ /* Locks were not held for this particular entity *
+ * Hence nothing to release */
+ ret = 0;
+ goto out;
+ }
+
+ /* Looking for volcount or snapcount in the dict */
+ snprintf(name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32(dict, name_buf, &count);
+ if (ret) {
+ /* count is not present. Only one *
+ * element name needs to be unlocked */
+ snprintf(name_buf, sizeof(name_buf), "%sname", type);
+ ret = dict_get_str(dict, name_buf, &name);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch %sname", type);
+ goto out;
}
- /* Looking for volcount or snapcount in the dict */
- snprintf (name_buf, sizeof(name_buf), "%scount", type);
- ret = dict_get_int32 (dict, name_buf, &count);
+ ret = glusterd_mgmt_v3_unlock(name, uuid, type);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Failed to release lock for %s %s "
+ "on behalf of %s.",
+ type, name, uuid_utoa(uuid));
+ goto out;
+ }
+ } else {
+ /* Unlocking one element name after another */
+ ret = glusterd_release_multiple_locks_per_entity(dict, uuid, count,
+ type);
if (ret) {
- /* count is not present. Only one *
- * element name needs to be unlocked */
- snprintf (name_buf, sizeof(name_buf), "%sname",
- type);
- ret = dict_get_str (dict, name_buf, &name);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to fetch %sname", type);
- goto out;
- }
-
- ret = glusterd_mgmt_v3_unlock (name, uuid, type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Failed to release lock for %s %s "
- "on behalf of %s.", type, name,
- uuid_utoa(uuid));
- goto out;
- }
- } else {
- /* Unlocking one element name after another */
- ret = glusterd_release_multiple_locks_per_entity (dict,
- uuid,
- count,
- type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MULTIPLE_LOCK_RELEASE_FAIL,
- "Failed to release all %s locks", type);
- goto out;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MULTIPLE_LOCK_RELEASE_FAIL,
+ "Failed to release all %s locks", type);
+ goto out;
}
+ }
- ret = 0;
+ ret = 0;
out:
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
/* Given the type of entity, this function figures out if it should lock a *
@@ -381,541 +365,500 @@ out:
* if the type is "vol", this function will accordingly lock a single volume *
* or multiple volumes */
static int32_t
-glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, uint32_t *op_errno,
- char *type, gf_boolean_t default_value)
+glusterd_mgmt_v3_lock_entity(dict_t *dict, uuid_t uuid, uint32_t *op_errno,
+ char *type, gf_boolean_t default_value)
{
- char name_buf[PATH_MAX] = "";
- char *name = NULL;
- int32_t count = -1;
- int32_t ret = -1;
- gf_boolean_t hold_locks = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT(this);
- GF_ASSERT (dict);
- GF_ASSERT (type);
-
- snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
- hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
-
- if (hold_locks == _gf_false) {
- /* Not holding locks for this particular entity */
- ret = 0;
- goto out;
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t count = -1;
+ int32_t ret = -1;
+ gf_boolean_t hold_locks = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(type);
+
+ snprintf(name_buf, sizeof(name_buf), "hold_%s_locks", type);
+ hold_locks = dict_get_str_boolean(dict, name_buf, default_value);
+
+ if (hold_locks == _gf_false) {
+ /* Not holding locks for this particular entity */
+ ret = 0;
+ goto out;
+ }
+
+ /* Looking for volcount or snapcount in the dict */
+ snprintf(name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32(dict, name_buf, &count);
+ if (ret) {
+ /* count is not present. Only one *
+ * element name needs to be locked */
+ snprintf(name_buf, sizeof(name_buf), "%sname", type);
+ ret = dict_get_str(dict, name_buf, &name);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to fetch %sname", type);
+ goto out;
}
- /* Looking for volcount or snapcount in the dict */
- snprintf (name_buf, sizeof(name_buf), "%scount", type);
- ret = dict_get_int32 (dict, name_buf, &count);
+ ret = glusterd_mgmt_v3_lock(name, uuid, op_errno, type);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "Failed to acquire lock for %s %s "
+ "on behalf of %s.",
+ type, name, uuid_utoa(uuid));
+ goto out;
+ }
+ } else {
+ /* Locking one element name after another */
+ ret = glusterd_acquire_multiple_locks_per_entity(dict, uuid, op_errno,
+ count, type);
if (ret) {
- /* count is not present. Only one *
- * element name needs to be locked */
- snprintf (name_buf, sizeof(name_buf), "%sname",
- type);
- ret = dict_get_str (dict, name_buf, &name);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to fetch %sname", type);
- goto out;
- }
-
- ret = glusterd_mgmt_v3_lock (name, uuid, op_errno, type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL,
- "Failed to acquire lock for %s %s "
- "on behalf of %s.", type, name,
- uuid_utoa(uuid));
- goto out;
- }
- } else {
- /* Locking one element name after another */
- ret = glusterd_acquire_multiple_locks_per_entity (dict,
- uuid,
- op_errno,
- count,
- type);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MULTIPLE_LOCK_ACQUIRE_FAIL,
- "Failed to acquire all %s locks", type);
- goto out;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MULTIPLE_LOCK_ACQUIRE_FAIL,
+ "Failed to acquire all %s locks", type);
+ goto out;
}
+ }
- ret = 0;
+ ret = 0;
out:
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
/* Try to release locks of multiple entities like *
* volume, snaps etc. */
int32_t
-glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid)
+glusterd_multiple_mgmt_v3_unlock(dict_t *dict, uuid_t uuid)
{
- int32_t i = -1;
- int32_t ret = -1;
- int32_t op_ret = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT(this);
-
- if (!dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_EMPTY, "dict is null.");
- ret = -1;
- goto out;
- }
+ int32_t i = -1;
+ int32_t ret = -1;
+ int32_t op_ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (!dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_EMPTY, "dict is null.");
+ ret = -1;
+ goto out;
+ }
- for (i = 0; valid_types[i].type; i++) {
- ret = glusterd_mgmt_v3_unlock_entity
- (dict, uuid,
- valid_types[i].type,
+ for (i = 0; valid_types[i].type; i++) {
+ ret = glusterd_mgmt_v3_unlock_entity(dict, uuid, valid_types[i].type,
valid_types[i].default_value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MULTIPLE_LOCK_RELEASE_FAIL,
- "Unable to unlock all %s",
- valid_types[i].type);
- op_ret = ret;
- }
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MULTIPLE_LOCK_RELEASE_FAIL, "Unable to unlock all %s",
+ valid_types[i].type);
+ op_ret = ret;
}
+ }
- ret = op_ret;
+ ret = op_ret;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
/* Try to acquire locks on multiple entities like *
* volume, snaps etc. */
int32_t
-glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid, uint32_t *op_errno)
+glusterd_multiple_mgmt_v3_lock(dict_t *dict, uuid_t uuid, uint32_t *op_errno)
{
- int32_t i = -1;
- int32_t ret = -1;
- int32_t locked_count = 0;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT(this);
-
- if (!dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_EMPTY, "dict is null.");
- ret = -1;
- goto out;
- }
+ int32_t i = -1;
+ int32_t ret = -1;
+ int32_t locked_count = 0;
+ xlator_t *this = NULL;
- /* Locking one entity after other */
- for (i = 0; valid_types[i].type; i++) {
- ret = glusterd_mgmt_v3_lock_entity
- (dict, uuid, op_errno,
- valid_types[i].type,
- valid_types[i].default_value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MULTIPLE_LOCK_ACQUIRE_FAIL,
- "Unable to lock all %s",
- valid_types[i].type);
- break;
- }
- locked_count++;
- }
-
- if (locked_count == GF_MAX_LOCKING_ENTITIES) {
- /* If all locking ops went successfully, return as success */
- ret = 0;
- goto out;
- }
+ this = THIS;
+ GF_ASSERT(this);
- /* If we failed to lock one entity, unlock others and return failure */
- for (i = 0; i < locked_count; i++) {
- ret = glusterd_mgmt_v3_unlock_entity
- (dict, uuid,
- valid_types[i].type,
- valid_types[i].default_value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MULTIPLE_LOCK_RELEASE_FAIL,
- "Unable to unlock all %s",
- valid_types[i].type);
- }
- }
+ if (!dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_EMPTY, "dict is null.");
ret = -1;
-out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
-}
-
-
-int32_t
-glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, uint32_t *op_errno,
- char *type)
-{
- char key[PATH_MAX] = "";
- int32_t ret = -1;
- glusterd_mgmt_v3_lock_obj *lock_obj = NULL;
- glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL;
- glusterd_conf_t *priv = NULL;
- gf_boolean_t is_valid = _gf_true;
- uuid_t owner = {0};
- xlator_t *this = NULL;
- char *bt = NULL;
- struct timespec delay = {0};
- char *key_dup = NULL;
- glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL;
- xlator_t *mgmt_lock_timer_xl = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- priv = this->private;
- GF_ASSERT (priv);
-
- if (!name || !type) {
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "name or type is null.");
- ret = -1;
- goto out;
- }
-
- is_valid = glusterd_mgmt_v3_is_type_valid (type);
- if (is_valid != _gf_true) {
- gf_msg_callingfn (this->name, GF_LOG_ERROR,
- EINVAL, GD_MSG_INVALID_ENTRY,
- "Invalid entity. Cannot perform locking "
- "operation on %s types", type);
- ret = -1;
- goto out;
- }
-
- ret = snprintf (key, sizeof(key), "%s_%s", name, type);
- if (ret != strlen(name) + 1 + strlen(type)) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CREATE_KEY_FAIL, "Unable to create key");
- goto out;
- }
-
- gf_msg_debug (this->name, 0,
- "Trying to acquire lock of %s %s for %s as %s",
- type, name, uuid_utoa (uuid), key);
-
- ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
+ goto out;
+ }
+
+ /* Locking one entity after other */
+ for (i = 0; valid_types[i].type; i++) {
+ ret = glusterd_mgmt_v3_lock_entity(dict, uuid, op_errno,
+ valid_types[i].type,
+ valid_types[i].default_value);
if (ret) {
- gf_msg_debug (this->name, 0,
- "Unable to get mgmt_v3 lock owner");
- goto out;
- }
-
- /* If the lock has already been held for the given volume
- * we fail */
- if (!gf_uuid_is_null (owner)) {
- gf_msg_callingfn (this->name, GF_LOG_WARNING,
- 0, GD_MSG_LOCK_ALREADY_HELD,
- "Lock for %s held by %s",
- name, uuid_utoa (owner));
- ret = -1;
- *op_errno = EG_ANOTRANS;
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MULTIPLE_LOCK_ACQUIRE_FAIL, "Unable to lock all %s",
+ valid_types[i].type);
+ break;
}
+ locked_count++;
+ }
- lock_obj = GF_CALLOC (1, sizeof(glusterd_mgmt_v3_lock_obj),
- gf_common_mt_mgmt_v3_lock_obj_t);
- if (!lock_obj) {
- ret = -1;
- goto out;
- }
-
- gf_uuid_copy (lock_obj->lock_owner, uuid);
+ if (locked_count == GF_MAX_LOCKING_ENTITIES) {
+ /* If all locking ops went successfully, return as success */
+ ret = 0;
+ goto out;
+ }
- ret = dict_set_bin (priv->mgmt_v3_lock, key, lock_obj,
- sizeof(glusterd_mgmt_v3_lock_obj));
+ /* If we failed to lock one entity, unlock others and return failure */
+ for (i = 0; i < locked_count; i++) {
+ ret = glusterd_mgmt_v3_unlock_entity(dict, uuid, valid_types[i].type,
+ valid_types[i].default_value);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Unable to set lock owner in mgmt_v3 lock");
- GF_FREE (lock_obj);
- goto out;
- }
-
- mgmt_lock_timer = GF_CALLOC (1, sizeof(glusterd_mgmt_v3_lock_timer),
- gf_common_mt_mgmt_v3_lock_timer_t);
-
- if (!mgmt_lock_timer) {
- ret = -1;
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_MULTIPLE_LOCK_RELEASE_FAIL, "Unable to unlock all %s",
+ valid_types[i].type);
}
+ }
+ ret = -1;
+out:
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
+}
- mgmt_lock_timer->xl = THIS;
- key_dup = gf_strdup (key);
- delay.tv_sec = priv->mgmt_v3_lock_timeout;
- delay.tv_nsec = 0;
- /*changing to default timeout value*/
- priv->mgmt_v3_lock_timeout = GF_LOCK_TIMER;
+int32_t
+glusterd_mgmt_v3_lock(const char *name, uuid_t uuid, uint32_t *op_errno,
+ char *type)
+{
+ char key[PATH_MAX] = "";
+ int32_t ret = -1;
+ glusterd_mgmt_v3_lock_obj *lock_obj = NULL;
+ glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t is_valid = _gf_true;
+ uuid_t owner = {0};
+ xlator_t *this = NULL;
+ char *bt = NULL;
+ struct timespec delay = {0};
+ char *key_dup = NULL;
+ glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL;
+ xlator_t *mgmt_lock_timer_xl = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (!name || !type) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "name or type is null.");
+ ret = -1;
+ goto out;
+ }
+
+ is_valid = glusterd_mgmt_v3_is_type_valid(type);
+ if (is_valid != _gf_true) {
+ gf_msg_callingfn(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid entity. Cannot perform locking "
+ "operation on %s types",
+ type);
+ ret = -1;
+ goto out;
+ }
+ ret = snprintf(key, sizeof(key), "%s_%s", name, type);
+ if (ret != strlen(name) + 1 + strlen(type)) {
ret = -1;
- mgmt_lock_timer_xl = mgmt_lock_timer->xl;
- GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_xl, out);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CREATE_KEY_FAIL,
+ "Unable to create key");
+ goto out;
+ }
+
+ gf_msg_debug(this->name, 0, "Trying to acquire lock of %s %s for %s as %s",
+ type, name, uuid_utoa(uuid), key);
+
+ ret = glusterd_get_mgmt_v3_lock_owner(key, &owner);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "Unable to get mgmt_v3 lock owner");
+ goto out;
+ }
+
+ /* If the lock has already been held for the given volume
+ * we fail */
+ if (!gf_uuid_is_null(owner)) {
+ gf_msg_callingfn(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_LOCK_ALREADY_HELD, "Lock for %s held by %s",
+ name, uuid_utoa(owner));
+ ret = -1;
+ *op_errno = EG_ANOTRANS;
+ goto out;
+ }
- mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx;
- GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_ctx, out);
+ lock_obj = GF_CALLOC(1, sizeof(glusterd_mgmt_v3_lock_obj),
+ gf_common_mt_mgmt_v3_lock_obj_t);
+ if (!lock_obj) {
+ ret = -1;
+ goto out;
+ }
- mgmt_lock_timer->timer = gf_timer_call_after
- (mgmt_lock_timer_ctx, delay,
- gd_mgmt_v3_unlock_timer_cbk,
- key_dup);
+ gf_uuid_copy(lock_obj->lock_owner, uuid);
- ret = dict_set_bin (priv->mgmt_v3_lock_timer, key, mgmt_lock_timer,
- sizeof (glusterd_mgmt_v3_lock_timer));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Unable to set timer in mgmt_v3 lock");
- GF_FREE (mgmt_lock_timer);
- goto out;
- }
+ ret = dict_set_bin(priv->mgmt_v3_lock, key, lock_obj,
+ sizeof(glusterd_mgmt_v3_lock_obj));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set lock owner in mgmt_v3 lock");
+ GF_FREE(lock_obj);
+ goto out;
+ }
+ mgmt_lock_timer = GF_CALLOC(1, sizeof(glusterd_mgmt_v3_lock_timer),
+ gf_common_mt_mgmt_v3_lock_timer_t);
- /* Saving the backtrace into the pre-allocated buffer, ctx->btbuf*/
- if ((bt = gf_backtrace_save (NULL))) {
- snprintf (key, sizeof (key), "debug.last-success-bt-%s-%s",
- name, type);
- ret = dict_set_dynstr_with_alloc (priv->mgmt_v3_lock, key, bt);
- if (ret)
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to save "
- "the back trace for lock %s-%s granted to %s",
- name, type, uuid_utoa (uuid));
- ret = 0;
- }
+ if (!mgmt_lock_timer) {
+ ret = -1;
+ goto out;
+ }
+
+ mgmt_lock_timer->xl = THIS;
+ key_dup = gf_strdup(key);
+ delay.tv_sec = priv->mgmt_v3_lock_timeout;
+ delay.tv_nsec = 0;
+ /*changing to default timeout value*/
+ priv->mgmt_v3_lock_timeout = GF_LOCK_TIMER;
+
+ ret = -1;
+ mgmt_lock_timer_xl = mgmt_lock_timer->xl;
+ GF_VALIDATE_OR_GOTO(this->name, mgmt_lock_timer_xl, out);
+
+ mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx;
+ GF_VALIDATE_OR_GOTO(this->name, mgmt_lock_timer_ctx, out);
+
+ mgmt_lock_timer->timer = gf_timer_call_after(
+ mgmt_lock_timer_ctx, delay, gd_mgmt_v3_unlock_timer_cbk, key_dup);
+
+ ret = dict_set_bin(priv->mgmt_v3_lock_timer, key, mgmt_lock_timer,
+ sizeof(glusterd_mgmt_v3_lock_timer));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set timer in mgmt_v3 lock");
+ GF_FREE(mgmt_lock_timer);
+ goto out;
+ }
+
+ /* Saving the backtrace into the pre-allocated buffer, ctx->btbuf*/
+ if ((bt = gf_backtrace_save(NULL))) {
+ snprintf(key, sizeof(key), "debug.last-success-bt-%s-%s", name, type);
+ ret = dict_set_dynstr_with_alloc(priv->mgmt_v3_lock, key, bt);
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to save "
+ "the back trace for lock %s-%s granted to %s",
+ name, type, uuid_utoa(uuid));
+ ret = 0;
+ }
- gf_msg_debug (this->name, 0,
- "Lock for %s %s successfully held by %s",
- type, name, uuid_utoa (uuid));
+ gf_msg_debug(this->name, 0, "Lock for %s %s successfully held by %s", type,
+ name, uuid_utoa(uuid));
- ret = 0;
+ ret = 0;
out:
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
/*
* This call back will ensure to unlock the lock_obj, in case we hit a situation
* where unlocking failed and stale lock exist*/
void
-gd_mgmt_v3_unlock_timer_cbk (void *data)
+gd_mgmt_v3_unlock_timer_cbk(void *data)
{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL;
- char *key = NULL;
- char *type = NULL;
- char bt_key[PATH_MAX] = "";
- char name[PATH_MAX] = "";
- int32_t ret = -1;
- glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL;
- xlator_t *mgmt_lock_timer_xl = NULL;
- gf_timer_t *timer = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, conf, out);
-
- gf_log (THIS->name, GF_LOG_INFO, "In gd_mgmt_v3_unlock_timer_cbk");
- GF_ASSERT (NULL != data);
- key = (char *)data;
-
- dict_del (conf->mgmt_v3_lock, key);
-
- type = strrchr (key, '_');
- strncpy (name, key, strlen (key) - strlen (type) - 1);
-
- ret = snprintf (bt_key, PATH_MAX, "debug.last-success-bt-%s-%s",
- name, type + 1);
- if (ret != SLEN ("debug.last-success-bt-") + strlen (name) +
- strlen (type)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CREATE_KEY_FAIL, "Unable to create backtrace "
- "key");
- goto out;
- }
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL;
+ char *key = NULL;
+ char *type = NULL;
+ char bt_key[PATH_MAX] = "";
+ char name[PATH_MAX] = "";
+ int32_t ret = -1;
+ glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL;
+ xlator_t *mgmt_lock_timer_xl = NULL;
+ gf_timer_t *timer = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ gf_log(THIS->name, GF_LOG_INFO, "In gd_mgmt_v3_unlock_timer_cbk");
+ GF_ASSERT(NULL != data);
+ key = (char *)data;
+
+ dict_del(conf->mgmt_v3_lock, key);
+
+ type = strrchr(key, '_');
+ strncpy(name, key, strlen(key) - strlen(type) - 1);
+
+ ret = snprintf(bt_key, PATH_MAX, "debug.last-success-bt-%s-%s", name,
+ type + 1);
+ if (ret != SLEN("debug.last-success-bt-") + strlen(name) + strlen(type)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CREATE_KEY_FAIL,
+ "Unable to create backtrace "
+ "key");
+ goto out;
+ }
+
+ dict_del(conf->mgmt_v3_lock, bt_key);
+
+ ret = dict_get_bin(conf->mgmt_v3_lock_timer, key,
+ (void **)&mgmt_lock_timer);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to get lock owner in mgmt_v3 lock");
+ }
- dict_del (conf->mgmt_v3_lock, bt_key);
+out:
+ if (mgmt_lock_timer && mgmt_lock_timer->timer) {
+ mgmt_lock_timer_xl = mgmt_lock_timer->xl;
+ GF_VALIDATE_OR_GOTO(this->name, mgmt_lock_timer_xl, ret_function);
- ret = dict_get_bin (conf->mgmt_v3_lock_timer, key,
- (void **)&mgmt_lock_timer);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Unable to get lock owner in mgmt_v3 lock");
- }
+ mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx;
+ GF_VALIDATE_OR_GOTO(this->name, mgmt_lock_timer_ctx, ret_function);
-out:
- if (mgmt_lock_timer && mgmt_lock_timer->timer) {
- mgmt_lock_timer_xl = mgmt_lock_timer->xl;
- GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_xl,
- ret_function);
-
- mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx;
- GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_ctx,
- ret_function);
-
- timer = mgmt_lock_timer->timer;
- GF_FREE (timer->data);
- gf_timer_call_cancel (mgmt_lock_timer_ctx,
- mgmt_lock_timer->timer);
- dict_del (conf->mgmt_v3_lock_timer, bt_key);
- mgmt_lock_timer->timer = NULL;
- }
+ timer = mgmt_lock_timer->timer;
+ GF_FREE(timer->data);
+ gf_timer_call_cancel(mgmt_lock_timer_ctx, mgmt_lock_timer->timer);
+ dict_del(conf->mgmt_v3_lock_timer, bt_key);
+ mgmt_lock_timer->timer = NULL;
+ }
ret_function:
- return;
-
+ return;
}
int32_t
-glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
+glusterd_mgmt_v3_unlock(const char *name, uuid_t uuid, char *type)
{
- char key[PATH_MAX] = "";
- char key_dup[PATH_MAX] = "";
- int32_t ret = -1;
- gf_boolean_t is_valid = _gf_true;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL;
- uuid_t owner = {0};
- xlator_t *this = NULL;
- glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL;
- xlator_t *mgmt_lock_timer_xl = NULL;
- gf_timer_t *timer = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- priv = this->private;
- GF_ASSERT (priv);
-
- if (!name || !type) {
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "name is null.");
- ret = -1;
- goto out;
- }
-
- is_valid = glusterd_mgmt_v3_is_type_valid (type);
- if (is_valid != _gf_true) {
- gf_msg_callingfn (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY,
- "Invalid entity. Cannot perform unlocking "
- "operation on %s types", type);
- ret = -1;
- goto out;
- }
-
- ret = snprintf (key, sizeof(key), "%s_%s",
- name, type);
- if (ret != strlen(name) + 1 + strlen(type)) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CREATE_KEY_FAIL, "Unable to create key");
- ret = -1;
- goto out;
- }
- strncpy (key_dup, key, strlen(key));
-
- gf_msg_debug (this->name, 0,
- "Trying to release lock of %s %s for %s as %s",
- type, name, uuid_utoa (uuid), key);
-
- ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
- if (ret) {
- gf_msg_debug (this->name, 0,
- "Unable to get mgmt_v3 lock owner");
- goto out;
- }
-
- if (gf_uuid_is_null (owner)) {
- gf_msg_callingfn (this->name, GF_LOG_WARNING,
- 0, GD_MSG_LOCK_NOT_HELD,
- "Lock for %s %s not held", type, name);
- ret = -1;
- goto out;
- }
+ char key[PATH_MAX] = "";
+ char key_dup[PATH_MAX] = "";
+ int32_t ret = -1;
+ gf_boolean_t is_valid = _gf_true;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_mgmt_v3_lock_timer *mgmt_lock_timer = NULL;
+ uuid_t owner = {0};
+ xlator_t *this = NULL;
+ glusterfs_ctx_t *mgmt_lock_timer_ctx = NULL;
+ xlator_t *mgmt_lock_timer_xl = NULL;
+ gf_timer_t *timer = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (!name || !type) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "name is null.");
+ ret = -1;
+ goto out;
+ }
+
+ is_valid = glusterd_mgmt_v3_is_type_valid(type);
+ if (is_valid != _gf_true) {
+ gf_msg_callingfn(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid entity. Cannot perform unlocking "
+ "operation on %s types",
+ type);
+ ret = -1;
+ goto out;
+ }
- ret = gf_uuid_compare (uuid, owner);
- if (ret) {
- gf_msg_callingfn (this->name, GF_LOG_WARNING,
- 0, GD_MSG_LOCK_OWNER_MISMATCH,
- "Lock owner mismatch. "
- "Lock for %s %s held by %s",
- type, name, uuid_utoa (owner));
- goto out;
- }
+ ret = snprintf(key, sizeof(key), "%s_%s", name, type);
+ if (ret != strlen(name) + 1 + strlen(type)) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CREATE_KEY_FAIL,
+ "Unable to create key");
+ ret = -1;
+ goto out;
+ }
+ strncpy(key_dup, key, strlen(key));
+
+ gf_msg_debug(this->name, 0, "Trying to release lock of %s %s for %s as %s",
+ type, name, uuid_utoa(uuid), key);
+
+ ret = glusterd_get_mgmt_v3_lock_owner(key, &owner);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "Unable to get mgmt_v3 lock owner");
+ goto out;
+ }
+
+ if (gf_uuid_is_null(owner)) {
+ gf_msg_callingfn(this->name, GF_LOG_WARNING, 0, GD_MSG_LOCK_NOT_HELD,
+ "Lock for %s %s not held", type, name);
+ ret = -1;
+ goto out;
+ }
+
+ ret = gf_uuid_compare(uuid, owner);
+ if (ret) {
+ gf_msg_callingfn(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_LOCK_OWNER_MISMATCH,
+ "Lock owner mismatch. "
+ "Lock for %s %s held by %s",
+ type, name, uuid_utoa(owner));
+ goto out;
+ }
+
+ /* Removing the mgmt_v3 lock from the global list */
+ dict_del(priv->mgmt_v3_lock, key);
+
+ ret = dict_get_bin(priv->mgmt_v3_lock_timer, key,
+ (void **)&mgmt_lock_timer);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to get mgmt lock key in mgmt_v3 lock");
+ goto out;
+ }
+
+ /* Remove the backtrace key as well */
+ ret = snprintf(key, sizeof(key), "debug.last-success-bt-%s-%s", name, type);
+ if (ret !=
+ SLEN("debug.last-success-bt-") + strlen(name) + strlen(type) + 1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CREATE_KEY_FAIL,
+ "Unable to create backtrace "
+ "key");
+ ret = -1;
+ goto out;
+ }
+ dict_del(priv->mgmt_v3_lock, key);
- /* Removing the mgmt_v3 lock from the global list */
- dict_del (priv->mgmt_v3_lock, key);
+ gf_msg_debug(this->name, 0, "Lock for %s %s successfully released", type,
+ name);
- ret = dict_get_bin (priv->mgmt_v3_lock_timer, key,
- (void **)&mgmt_lock_timer);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Unable to get mgmt lock key in mgmt_v3 lock");
- goto out;
- }
+ /* Release owner reference which was held during lock */
+ if (mgmt_lock_timer->timer) {
+ ret = -1;
+ mgmt_lock_timer_xl = mgmt_lock_timer->xl;
+ GF_VALIDATE_OR_GOTO(this->name, mgmt_lock_timer_xl, out);
- /* Remove the backtrace key as well */
- ret = snprintf (key, sizeof(key), "debug.last-success-bt-%s-%s", name,
- type);
- if (ret != SLEN ("debug.last-success-bt-") + strlen (name) +
- strlen (type) + 1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CREATE_KEY_FAIL, "Unable to create backtrace "
- "key");
- ret = -1;
- goto out;
- }
- dict_del (priv->mgmt_v3_lock, key);
-
- gf_msg_debug (this->name, 0,
- "Lock for %s %s successfully released",
- type, name);
-
- /* Release owner reference which was held during lock */
- if (mgmt_lock_timer->timer) {
- ret = -1;
- mgmt_lock_timer_xl = mgmt_lock_timer->xl;
- GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_xl, out);
-
- mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx;
- GF_VALIDATE_OR_GOTO (this->name, mgmt_lock_timer_ctx, out);
- ret = 0;
-
- timer = mgmt_lock_timer->timer;
- GF_FREE (timer->data);
- gf_timer_call_cancel (mgmt_lock_timer_ctx,
- mgmt_lock_timer->timer);
- dict_del (priv->mgmt_v3_lock_timer, key_dup);
- }
- ret = glusterd_volinfo_find (name, &volinfo);
- if (volinfo && volinfo->stage_deleted) {
- /* this indicates a volume still exists and the volume delete
- * operation has failed in some of the phases, need to ensure
- * stage_deleted flag is set back to false
- */
- volinfo->stage_deleted = _gf_false;
- }
+ mgmt_lock_timer_ctx = mgmt_lock_timer_xl->ctx;
+ GF_VALIDATE_OR_GOTO(this->name, mgmt_lock_timer_ctx, out);
ret = 0;
+
+ timer = mgmt_lock_timer->timer;
+ GF_FREE(timer->data);
+ gf_timer_call_cancel(mgmt_lock_timer_ctx, mgmt_lock_timer->timer);
+ dict_del(priv->mgmt_v3_lock_timer, key_dup);
+ }
+ ret = glusterd_volinfo_find(name, &volinfo);
+ if (volinfo && volinfo->stage_deleted) {
+ /* this indicates a volume still exists and the volume delete
+ * operation has failed in some of the phases, need to ensure
+ * stage_deleted flag is set back to false
+ */
+ volinfo->stage_deleted = _gf_false;
+ }
+ ret = 0;
out:
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-log-ops.c b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
index 0a811db9ebc..4742225beb5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-log-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-log-ops.c
@@ -21,266 +21,266 @@
#include <signal.h>
int
-__glusterd_handle_log_rotate (rpcsvc_request_t *req)
+__glusterd_handle_log_rotate(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = {{0,}};
- dict_t *dict = NULL;
- glusterd_op_t cli_op = GD_OP_LOG_ROTATE;
- char *volname = NULL;
- char msg[64] = {0,};
- xlator_t *this = NULL;
-
- GF_ASSERT (req);
- this = THIS;
- GF_ASSERT (this);
-
- ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_LOG_ROTATE;
+ char *volname = NULL;
+ char msg[64] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ GF_ASSERT(req);
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ // failed to decode msg;
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
if (ret < 0) {
- //failed to decode msg;
- req->rpc_err = GARBAGE_ARGS;
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(msg, sizeof(msg),
+ "Unable to decode the "
+ "command");
+ goto out;
}
+ }
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new ();
-
- ret = dict_unserialize (cli_req.dict.dict_val,
- cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf (msg, sizeof (msg), "Unable to decode the "
- "command");
- goto out;
- }
- }
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Failed to get volume name");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s", msg);
+ goto out;
+ }
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- snprintf (msg, sizeof (msg), "Failed to get volume name");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "%s", msg);
- goto out;
- }
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_LOG_ROTATE_REQ_RECVD,
+ "Received log rotate req "
+ "for volume %s",
+ volname);
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_LOG_ROTATE_REQ_RECVD,
- "Received log rotate req "
- "for volume %s", volname);
-
- ret = dict_set_uint64 (dict, "rotate-key", (uint64_t)time (NULL));
- if (ret)
- goto out;
+ ret = dict_set_uint64(dict, "rotate-key", (uint64_t)time(NULL));
+ if (ret)
+ goto out;
- ret = glusterd_op_begin_synctask (req, GD_OP_LOG_ROTATE, dict);
+ ret = glusterd_op_begin_synctask(req, GD_OP_LOG_ROTATE, dict);
out:
- if (ret) {
- if (msg[0] == '\0')
- snprintf (msg, sizeof (msg), "Operation failed");
- ret = glusterd_op_send_cli_response (cli_op, ret, 0, req,
- dict, msg);
- }
-
- free (cli_req.dict.dict_val);
- return ret;
+ if (ret) {
+ if (msg[0] == '\0')
+ snprintf(msg, sizeof(msg), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, msg);
+ }
+
+ free(cli_req.dict.dict_val);
+ return ret;
}
int
-glusterd_handle_log_rotate (rpcsvc_request_t *req)
+glusterd_handle_log_rotate(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- __glusterd_handle_log_rotate);
+ return glusterd_big_locked_handler(req, __glusterd_handle_log_rotate);
}
/* op-sm */
int
-glusterd_op_stage_log_rotate (dict_t *dict, char **op_errstr)
+glusterd_op_stage_log_rotate(dict_t *dict, char **op_errstr)
{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- gf_boolean_t exists = _gf_false;
- char msg[2048] = {0};
- char *brick = NULL;
-
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
- }
-
- exists = glusterd_check_volume_exists (volname);
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (!exists) {
- snprintf (msg, sizeof (msg), "Volume %s does not exist",
- volname);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", msg);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
-
- if (_gf_false == glusterd_is_volume_started (volinfo)) {
- snprintf (msg, sizeof (msg), "Volume %s needs to be started before"
- " log rotate.", volname);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_STARTED, "%s", msg);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str (dict, "brick", &brick);
- /* If no brick is specified, do log-rotate for
- all the bricks in the volume */
- if (ret) {
- ret = 0;
- goto out;
- }
-
- ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo, NULL,
- _gf_false);
- if (ret) {
- snprintf (msg, sizeof (msg), "Incorrect brick %s "
- "for volume %s", brick, volname);
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ gf_boolean_t exists = _gf_false;
+ char msg[2048] = {0};
+ char *brick = NULL;
+
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ exists = glusterd_check_volume_exists(volname);
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (!exists) {
+ snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ ret = -1;
+ goto out;
+ }
+
+ if (_gf_false == glusterd_is_volume_started(volinfo)) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s needs to be started before"
+ " log rotate.",
+ volname);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED, "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "brick", &brick);
+ /* If no brick is specified, do log-rotate for
+ all the bricks in the volume */
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, NULL,
+ _gf_false);
+ if (ret) {
+ snprintf(msg, sizeof(msg),
+ "Incorrect brick %s "
+ "for volume %s",
+ brick, volname);
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
- return ret;
+ return ret;
}
-
int
-glusterd_op_log_rotate (dict_t *dict)
+glusterd_op_log_rotate(dict_t *dict)
{
- int ret = -1;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- xlator_t *this = NULL;
- char *volname = NULL;
- char *brick = NULL;
- char logfile[PATH_MAX] = {0,};
- char pidfile[PATH_MAX] = {0,};
- FILE *file = NULL;
- pid_t pid = 0;
- uint64_t key = 0;
- int valid_brick = 0;
- glusterd_brickinfo_t *tmpbrkinfo = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "volname not found");
- goto out;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ xlator_t *this = NULL;
+ char *volname = NULL;
+ char *brick = NULL;
+ char logfile[PATH_MAX] = {
+ 0,
+ };
+ char pidfile[PATH_MAX] = {
+ 0,
+ };
+ FILE *file = NULL;
+ pid_t pid = 0;
+ uint64_t key = 0;
+ int valid_brick = 0;
+ glusterd_brickinfo_t *tmpbrkinfo = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_str(dict, "volname", &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "volname not found");
+ goto out;
+ }
+
+ ret = dict_get_uint64(dict, "rotate-key", &key);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "rotate key not found");
+ goto out;
+ }
+
+ ret = dict_get_str(dict, "brick", &brick);
+ /* If no brick is specified, do log-rotate for
+ all the bricks in the volume */
+ if (ret)
+ goto cont;
+
+ ret = glusterd_brickinfo_new_from_brick(brick, &tmpbrkinfo, _gf_false,
+ NULL);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
+ "cannot get brickinfo from brick");
+ goto out;
+ }
+
+cont:
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret)
+ goto out;
+
+ ret = -1;
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
+ continue;
+
+ if (brick && (strcmp(tmpbrkinfo->hostname, brickinfo->hostname) ||
+ strcmp(tmpbrkinfo->path, brickinfo->path)))
+ continue;
+
+ valid_brick = 1;
+
+ GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo, priv);
+ file = fopen(pidfile, "r+");
+ if (!file) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to open pidfile: %s", pidfile);
+ ret = -1;
+ goto out;
}
- ret = dict_get_uint64 (dict, "rotate-key", &key);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "rotate key not found");
- goto out;
+ ret = fscanf(file, "%d", &pid);
+ if (ret <= 0) {
+ fclose(file);
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
+ "Unable to read pidfile: %s", pidfile);
+ ret = -1;
+ goto out;
}
+ fclose(file);
+ file = NULL;
- ret = dict_get_str (dict, "brick", &brick);
- /* If no brick is specified, do log-rotate for
- all the bricks in the volume */
+ snprintf(logfile, PATH_MAX, "%s.%" PRIu64, brickinfo->logfile, key);
+
+ ret = sys_rename(brickinfo->logfile, logfile);
if (ret)
- goto cont;
+ gf_msg("glusterd", GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
+ "rename failed");
- ret = glusterd_brickinfo_new_from_brick (brick, &tmpbrkinfo,
- _gf_false, NULL);
+ ret = kill(pid, SIGHUP);
if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_BRICK_NOT_FOUND,
- "cannot get brickinfo from brick");
- goto out;
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_PID_KILL_FAIL,
+ "Unable to SIGHUP to %d", pid);
+ goto out;
}
+ ret = 0;
-cont:
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret)
- goto out;
-
- ret = -1;
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID))
- continue;
-
- if (brick &&
- (strcmp (tmpbrkinfo->hostname, brickinfo->hostname) ||
- strcmp (tmpbrkinfo->path,brickinfo->path)))
- continue;
-
- valid_brick = 1;
-
- GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, brickinfo, priv);
- file = fopen (pidfile, "r+");
- if (!file) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_FILE_OP_FAILED, "Unable to open pidfile: %s",
- pidfile);
- ret = -1;
- goto out;
- }
-
- ret = fscanf (file, "%d", &pid);
- if (ret <= 0) {
- fclose (file);
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_FILE_OP_FAILED, "Unable to read pidfile: %s",
- pidfile);
- ret = -1;
- goto out;
- }
- fclose (file);
- file = NULL;
-
- snprintf (logfile, PATH_MAX, "%s.%"PRIu64,
- brickinfo->logfile, key);
-
- ret = sys_rename (brickinfo->logfile, logfile);
- if (ret)
- gf_msg ("glusterd", GF_LOG_WARNING, errno,
- GD_MSG_FILE_OP_FAILED, "rename failed");
-
- ret = kill (pid, SIGHUP);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_PID_KILL_FAIL, "Unable to SIGHUP to %d", pid);
- goto out;
- }
- ret = 0;
-
- /* If request was for brick, only one iteration is enough */
- if (brick)
- break;
- }
+ /* If request was for brick, only one iteration is enough */
+ if (brick)
+ break;
+ }
- if (ret && !valid_brick)
- ret = 0;
+ if (ret && !valid_brick)
+ ret = 0;
out:
- if (tmpbrkinfo)
- glusterd_brickinfo_delete (tmpbrkinfo);
+ if (tmpbrkinfo)
+ glusterd_brickinfo_delete(tmpbrkinfo);
- return ret;
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
index 993f12af103..c8b080cc0ca 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -22,1007 +22,976 @@
#include "glusterd-messages.h"
static int
-glusterd_mgmt_v3_null (rpcsvc_request_t *req)
+glusterd_mgmt_v3_null(rpcsvc_request_t *req)
{
- return 0;
+ return 0;
}
static int
-glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status,
- uint32_t op_errno)
+glusterd_mgmt_v3_lock_send_resp(rpcsvc_request_t *req, int32_t status,
+ uint32_t op_errno)
{
+ gd1_mgmt_v3_lock_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = op_errno;
- gd1_mgmt_v3_lock_rsp rsp = {{0},};
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- rsp.op_ret = status;
- if (rsp.op_ret)
- rsp.op_errno = op_errno;
-
- glusterd_get_uuid (&rsp.uuid);
+ glusterd_get_uuid(&rsp.uuid);
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
- gf_msg_debug (this->name, 0,
- "Responded to mgmt_v3 lock, ret: %d", ret);
+ gf_msg_debug(this->name, 0, "Responded to mgmt_v3 lock, ret: %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req,
- gd1_mgmt_v3_lock_req *lock_req,
- glusterd_op_lock_ctx_t *ctx)
+glusterd_synctasked_mgmt_v3_lock(rpcsvc_request_t *req,
+ gd1_mgmt_v3_lock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- uint32_t op_errno = 0;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (ctx);
- GF_ASSERT (ctx->dict);
-
- /* Trying to acquire multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid, &op_errno);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL,
- "Failed to acquire mgmt_v3 locks for %s",
- uuid_utoa (ctx->uuid));
-
- ret = glusterd_mgmt_v3_lock_send_resp (req, ret, op_errno);
-
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ uint32_t op_errno = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(ctx);
+ GF_ASSERT(ctx->dict);
+
+ /* Trying to acquire multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_lock(ctx->dict, ctx->uuid, &op_errno);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "Failed to acquire mgmt_v3 locks for %s", uuid_utoa(ctx->uuid));
+
+ ret = glusterd_mgmt_v3_lock_send_resp(req, ret, op_errno);
+
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_op_state_machine_mgmt_v3_lock (rpcsvc_request_t *req,
+glusterd_op_state_machine_mgmt_v3_lock(rpcsvc_request_t *req,
gd1_mgmt_v3_lock_req *lock_req,
glusterd_op_lock_ctx_t *ctx)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_op_info_t txn_op_info = {{0},};
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- glusterd_txn_opinfo_init (&txn_op_info, NULL, &lock_req->op, ctx->dict,
- req);
-
- ret = glusterd_set_txn_opinfo (&lock_req->txn_id, &txn_op_info);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OPINFO_SET_FAIL,
- "Unable to set transaction's opinfo");
- goto out;
- }
-
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_LOCK,
- &lock_req->txn_id, ctx);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_EVENT_LOCK_FAIL,
- "Failed to inject event GD_OP_EVENT_LOCK");
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_op_info_t txn_op_info = {
+ {0},
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ glusterd_txn_opinfo_init(&txn_op_info, NULL, &lock_req->op, ctx->dict, req);
+
+ ret = glusterd_set_txn_opinfo(&lock_req->txn_id, &txn_op_info);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPINFO_SET_FAIL,
+ "Unable to set transaction's opinfo");
+ goto out;
+ }
+
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_LOCK, &lock_req->txn_id, ctx);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_EVENT_LOCK_FAIL,
+ "Failed to inject event GD_OP_EVENT_LOCK");
out:
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_handle_mgmt_v3_lock_fn (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_lock_fn(rpcsvc_request_t *req)
{
- gd1_mgmt_v3_lock_req lock_req = {{0},};
- int32_t ret = -1;
- glusterd_op_lock_ctx_t *ctx = NULL;
- xlator_t *this = NULL;
- gf_boolean_t is_synctasked = _gf_false;
- gf_boolean_t free_ctx = _gf_false;
- glusterd_conf_t *conf = NULL;
- uint32_t timeout = 0;
-
- this = THIS;
- conf = this->private;
- GF_ASSERT (conf);
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &lock_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode lock "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_msg_debug (this->name, 0, "Received mgmt_v3 lock req "
- "from uuid: %s", uuid_utoa (lock_req.uuid));
-
- if (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (lock_req.uuid));
- ret = -1;
- goto out;
- }
-
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
- if (!ctx) {
- ret = -1;
- goto out;
- }
-
- gf_uuid_copy (ctx->uuid, lock_req.uuid);
- ctx->req = req;
-
- ctx->dict = dict_new ();
- if (!ctx->dict) {
- ret = -1;
- goto out;
- }
-
- ret = dict_unserialize (lock_req.dict.dict_val,
- lock_req.dict.dict_len, &ctx->dict);
+ gd1_mgmt_v3_lock_req lock_req = {
+ {0},
+ };
+ int32_t ret = -1;
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_synctasked = _gf_false;
+ gf_boolean_t free_ctx = _gf_false;
+ glusterd_conf_t *conf = NULL;
+ uint32_t timeout = 0;
+
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT(conf);
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode lock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg_debug(this->name, 0,
+ "Received mgmt_v3 lock req "
+ "from uuid: %s",
+ uuid_utoa(lock_req.uuid));
+
+ if (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
+ if (!ctx) {
+ ret = -1;
+ goto out;
+ }
+
+ gf_uuid_copy(ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+
+ ctx->dict = dict_new();
+ if (!ctx->dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize(lock_req.dict.dict_val, lock_req.dict.dict_len,
+ &ctx->dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ /* Cli will add timeout key to dict if the default timeout is
+ * other than 2 minutes. Here we use this value to check whether
+ * mgmt_v3_lock_timeout should be set to default value or we
+ * need to change the value according to timeout value
+ * i.e, timeout + 120 seconds. */
+ ret = dict_get_uint32(ctx->dict, "timeout", &timeout);
+ if (!ret)
+ conf->mgmt_v3_lock_timeout = timeout + 120;
+
+ is_synctasked = dict_get_str_boolean(ctx->dict, "is_synctasked", _gf_false);
+ if (is_synctasked) {
+ ret = glusterd_synctasked_mgmt_v3_lock(req, &lock_req, ctx);
if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
- goto out;
- }
-
- /* Cli will add timeout key to dict if the default timeout is
- * other than 2 minutes. Here we use this value to check whether
- * mgmt_v3_lock_timeout should be set to default value or we
- * need to change the value according to timeout value
- * i.e, timeout + 120 seconds. */
- ret = dict_get_uint32 (ctx->dict, "timeout", &timeout);
- if (!ret)
- conf->mgmt_v3_lock_timeout = timeout + 120;
-
- is_synctasked = dict_get_str_boolean (ctx->dict,
- "is_synctasked", _gf_false);
- if (is_synctasked) {
- ret = glusterd_synctasked_mgmt_v3_lock (req, &lock_req, ctx);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL,
- "Failed to acquire mgmt_v3_locks");
- /* Ignore the return code, as it shouldn't be propagated
- * from the handler function so as to avoid double
- * deletion of the req
- */
- ret = 0;
- }
-
- /* The above function does not take ownership of ctx.
- * Therefore we need to free the ctx explicitly. */
- free_ctx = _gf_true;
- }
- else {
- /* Shouldn't ignore the return code here, and it should
- * be propagated from the handler function as in failure
- * case it doesn't delete the req object
- */
- ret = glusterd_op_state_machine_mgmt_v3_lock (req, &lock_req,
- ctx);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL,
- "Failed to acquire mgmt_v3_locks");
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "Failed to acquire mgmt_v3_locks");
+ /* Ignore the return code, as it shouldn't be propagated
+ * from the handler function so as to avoid double
+ * deletion of the req
+ */
+ ret = 0;
+ }
+
+ /* The above function does not take ownership of ctx.
+ * Therefore we need to free the ctx explicitly. */
+ free_ctx = _gf_true;
+ } else {
+ /* Shouldn't ignore the return code here, and it should
+ * be propagated from the handler function as in failure
+ * case it doesn't delete the req object
+ */
+ ret = glusterd_op_state_machine_mgmt_v3_lock(req, &lock_req, ctx);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "Failed to acquire mgmt_v3_locks");
+ }
out:
- if (ctx && (ret || free_ctx)) {
- if (ctx->dict)
- dict_unref (ctx->dict);
+ if (ctx && (ret || free_ctx)) {
+ if (ctx->dict)
+ dict_unref(ctx->dict);
- GF_FREE (ctx);
- }
+ GF_FREE(ctx);
+ }
- free (lock_req.dict.dict_val);
+ free(lock_req.dict.dict_val);
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_mgmt_v3_pre_validate_send_resp (rpcsvc_request_t *req,
- int32_t op, int32_t status,
- char *op_errstr, dict_t *rsp_dict,
- uint32_t op_errno)
+glusterd_mgmt_v3_pre_validate_send_resp(rpcsvc_request_t *req, int32_t op,
+ int32_t status, char *op_errstr,
+ dict_t *rsp_dict, uint32_t op_errno)
{
- gd1_mgmt_v3_pre_val_rsp rsp = {{0},};
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- rsp.op_ret = status;
- glusterd_get_uuid (&rsp.uuid);
- rsp.op = op;
- rsp.op_errno = op_errno;
- if (op_errstr)
- rsp.op_errstr = op_errstr;
- else
- rsp.op_errstr = "";
-
- ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
- goto out;
- }
-
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp);
-
- GF_FREE (rsp.dict.dict_val);
+ gd1_mgmt_v3_pre_val_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op = op;
+ rsp.op_errno = op_errno;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp);
+
+ GF_FREE(rsp.dict.dict_val);
out:
- gf_msg_debug (this->name, 0,
- "Responded to pre validation, ret: %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Responded to pre validation, ret: %d", ret);
+ return ret;
}
static int
-glusterd_handle_pre_validate_fn (rpcsvc_request_t *req)
+glusterd_handle_pre_validate_fn(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_v3_pre_val_req op_req = {{0},};
- xlator_t *this = NULL;
- char *op_errstr = NULL;
- dict_t *dict = NULL;
- dict_t *rsp_dict = NULL;
- uint32_t op_errno = 0;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &op_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL,
- "Failed to decode pre validation "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (op_req.uuid));
- ret = -1;
- goto out;
- }
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_unserialize (op_req.dict.dict_val,
- op_req.dict.dict_len, &dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
- goto out;
- }
-
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to get new dictionary");
- return -1;
- }
-
- ret = gd_mgmt_v3_pre_validate_fn (op_req.op, dict, &op_errstr,
- rsp_dict, &op_errno);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Pre Validation failed on operation %s",
- gd_op_list[op_req.op]);
- }
-
- ret = glusterd_mgmt_v3_pre_validate_send_resp (req, op_req.op,
- ret, op_errstr,
- rsp_dict, op_errno);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_OP_RESP_FAIL,
- "Failed to send Pre Validation "
- "response for operation %s",
- gd_op_list[op_req.op]);
- goto out;
- }
+ int32_t ret = -1;
+ gd1_mgmt_v3_pre_val_req op_req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ uint32_t op_errno = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode pre validation "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_pre_validate_fn(op_req.op, dict, &op_errstr, rsp_dict,
+ &op_errno);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Pre Validation failed on operation %s", gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_pre_validate_send_resp(
+ req, op_req.op, ret, op_errstr, rsp_dict, op_errno);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL,
+ "Failed to send Pre Validation "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
out:
- if (op_errstr && (strcmp (op_errstr, "")))
- GF_FREE (op_errstr);
+ if (op_errstr && (strcmp(op_errstr, "")))
+ GF_FREE(op_errstr);
- free (op_req.dict.dict_val);
+ free(op_req.dict.dict_val);
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- if (rsp_dict)
- dict_unref (rsp_dict);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- /* Return 0 from handler to avoid double deletion of req obj */
- return 0;
+ /* Return 0 from handler to avoid double deletion of req obj */
+ return 0;
}
static int
-glusterd_mgmt_v3_brick_op_send_resp (rpcsvc_request_t *req,
- int32_t op, int32_t status,
- char *op_errstr, dict_t *rsp_dict)
+glusterd_mgmt_v3_brick_op_send_resp(rpcsvc_request_t *req, int32_t op,
+ int32_t status, char *op_errstr,
+ dict_t *rsp_dict)
{
- gd1_mgmt_v3_brick_op_rsp rsp = {{0},};
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- rsp.op_ret = status;
- glusterd_get_uuid (&rsp.uuid);
- rsp.op = op;
- if (op_errstr)
- rsp.op_errstr = op_errstr;
- else
- rsp.op_errstr = "";
-
- ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
- goto out;
- }
-
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp);
-
- GF_FREE (rsp.dict.dict_val);
+ gd1_mgmt_v3_brick_op_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp);
+
+ GF_FREE(rsp.dict.dict_val);
out:
- gf_msg_debug (this->name, 0,
- "Responded to brick op, ret: %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Responded to brick op, ret: %d", ret);
+ return ret;
}
static int
-glusterd_handle_brick_op_fn (rpcsvc_request_t *req)
+glusterd_handle_brick_op_fn(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_v3_brick_op_req op_req = {{0},};
- xlator_t *this = NULL;
- char *op_errstr = NULL;
- dict_t *dict = NULL;
- dict_t *rsp_dict = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &op_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode brick op "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (op_req.uuid));
- ret = -1;
- goto out;
- }
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_unserialize (op_req.dict.dict_val,
- op_req.dict.dict_len, &dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
- goto out;
- }
-
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to get new dictionary");
- return -1;
- }
-
- ret = gd_mgmt_v3_brick_op_fn (op_req.op, dict, &op_errstr,
- rsp_dict);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_OP_FAIL,
- "Brick Op failed on operation %s",
- gd_op_list[op_req.op]);
- }
-
- ret = glusterd_mgmt_v3_brick_op_send_resp (req, op_req.op,
- ret, op_errstr,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALD_RESP_FAIL,
- "Failed to send brick op "
- "response for operation %s",
- gd_op_list[op_req.op]);
- goto out;
- }
+ int32_t ret = -1;
+ gd1_mgmt_v3_brick_op_req op_req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode brick op "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_brick_op_fn(op_req.op, dict, &op_errstr, rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
+ "Brick Op failed on operation %s", gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_brick_op_send_resp(req, op_req.op, ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALD_RESP_FAIL,
+ "Failed to send brick op "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
out:
- if (op_errstr && (strcmp (op_errstr, "")))
- GF_FREE (op_errstr);
+ if (op_errstr && (strcmp(op_errstr, "")))
+ GF_FREE(op_errstr);
- free (op_req.dict.dict_val);
+ free(op_req.dict.dict_val);
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- if (rsp_dict)
- dict_unref (rsp_dict);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- /* Return 0 from handler to avoid double deletion of req obj */
- return 0;
+ /* Return 0 from handler to avoid double deletion of req obj */
+ return 0;
}
static int
-glusterd_mgmt_v3_commit_send_resp (rpcsvc_request_t *req,
- int32_t op, int32_t status,
- char *op_errstr, uint32_t op_errno,
- dict_t *rsp_dict)
+glusterd_mgmt_v3_commit_send_resp(rpcsvc_request_t *req, int32_t op,
+ int32_t status, char *op_errstr,
+ uint32_t op_errno, dict_t *rsp_dict)
{
- gd1_mgmt_v3_commit_rsp rsp = {{0},};
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- rsp.op_ret = status;
- glusterd_get_uuid (&rsp.uuid);
- rsp.op = op;
- rsp.op_errno = op_errno;
- if (op_errstr)
- rsp.op_errstr = op_errstr;
- else
- rsp.op_errstr = "";
-
- ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
- goto out;
- }
-
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp);
-
- GF_FREE (rsp.dict.dict_val);
+ gd1_mgmt_v3_commit_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op = op;
+ rsp.op_errno = op_errno;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp);
+
+ GF_FREE(rsp.dict.dict_val);
out:
- gf_msg_debug (this->name, 0, "Responded to commit, ret: %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Responded to commit, ret: %d", ret);
+ return ret;
}
static int
-glusterd_handle_commit_fn (rpcsvc_request_t *req)
+glusterd_handle_commit_fn(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_v3_commit_req op_req = {{0},};
- xlator_t *this = NULL;
- char *op_errstr = NULL;
- dict_t *dict = NULL;
- dict_t *rsp_dict = NULL;
- uint32_t op_errno = 0;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &op_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_commit_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode commit "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (op_req.uuid));
- ret = -1;
- goto out;
- }
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_unserialize (op_req.dict.dict_val,
- op_req.dict.dict_len, &dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
- goto out;
- }
-
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to get new dictionary");
- return -1;
- }
-
- ret = gd_mgmt_v3_commit_fn (op_req.op, dict, &op_errstr,
- &op_errno, rsp_dict);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "commit failed on operation %s",
- gd_op_list[op_req.op]);
- }
-
- ret = glusterd_mgmt_v3_commit_send_resp (req, op_req.op,
- ret, op_errstr,
- op_errno, rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_OP_RESP_FAIL,
- "Failed to send commit "
- "response for operation %s",
- gd_op_list[op_req.op]);
- goto out;
- }
+ int32_t ret = -1;
+ gd1_mgmt_v3_commit_req op_req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ uint32_t op_errno = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_commit_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode commit "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_commit_fn(op_req.op, dict, &op_errstr, &op_errno,
+ rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "commit failed on operation %s", gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_commit_send_resp(req, op_req.op, ret, op_errstr,
+ op_errno, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL,
+ "Failed to send commit "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
out:
- if (op_errstr && (strcmp (op_errstr, "")))
- GF_FREE (op_errstr);
+ if (op_errstr && (strcmp(op_errstr, "")))
+ GF_FREE(op_errstr);
- free (op_req.dict.dict_val);
+ free(op_req.dict.dict_val);
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- if (rsp_dict)
- dict_unref (rsp_dict);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- /* Return 0 from handler to avoid double deletion of req obj */
- return 0;
+ /* Return 0 from handler to avoid double deletion of req obj */
+ return 0;
}
static int
-glusterd_mgmt_v3_post_validate_send_resp (rpcsvc_request_t *req,
- int32_t op, int32_t status,
- char *op_errstr, dict_t *rsp_dict)
+glusterd_mgmt_v3_post_validate_send_resp(rpcsvc_request_t *req, int32_t op,
+ int32_t status, char *op_errstr,
+ dict_t *rsp_dict)
{
- gd1_mgmt_v3_post_val_rsp rsp = {{0},};
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- rsp.op_ret = status;
- glusterd_get_uuid (&rsp.uuid);
- rsp.op = op;
- if (op_errstr)
- rsp.op_errstr = op_errstr;
- else
- rsp.op_errstr = "";
-
- ret = dict_allocate_and_serialize (rsp_dict, &rsp.dict.dict_val,
- &rsp.dict.dict_len);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
- goto out;
- }
-
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp);
-
- GF_FREE (rsp.dict.dict_val);
+ gd1_mgmt_v3_post_val_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ rsp.op_ret = status;
+ glusterd_get_uuid(&rsp.uuid);
+ rsp.op = op;
+ if (op_errstr)
+ rsp.op_errstr = op_errstr;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
+ "failed to get serialized length of dict");
+ goto out;
+ }
+
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp);
+
+ GF_FREE(rsp.dict.dict_val);
out:
- gf_msg_debug (this->name, 0,
- "Responded to post validation, ret: %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Responded to post validation, ret: %d", ret);
+ return ret;
}
static int
-glusterd_handle_post_validate_fn (rpcsvc_request_t *req)
+glusterd_handle_post_validate_fn(rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gd1_mgmt_v3_post_val_req op_req = {{0},};
- xlator_t *this = NULL;
- char *op_errstr = NULL;
- dict_t *dict = NULL;
- dict_t *rsp_dict = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &op_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_post_val_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL,
- "Failed to decode post validation "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- if (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (op_req.uuid));
- ret = -1;
- goto out;
- }
-
- dict = dict_new ();
- if (!dict)
- goto out;
-
- ret = dict_unserialize (op_req.dict.dict_val,
- op_req.dict.dict_len, &dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
- goto out;
- }
-
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to get new dictionary");
- return -1;
- }
-
- ret = gd_mgmt_v3_post_validate_fn (op_req.op, op_req.op_ret, dict,
- &op_errstr, rsp_dict);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_POST_VALIDATION_FAIL,
- "Post Validation failed on operation %s",
- gd_op_list[op_req.op]);
- }
-
- ret = glusterd_mgmt_v3_post_validate_send_resp (req, op_req.op,
- ret, op_errstr,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_OP_RESP_FAIL,
- "Failed to send Post Validation "
- "response for operation %s",
- gd_op_list[op_req.op]);
- goto out;
- }
+ int32_t ret = -1;
+ gd1_mgmt_v3_post_val_req op_req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &op_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_val_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode post validation "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(op_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ ret = gd_mgmt_v3_post_validate_fn(op_req.op, op_req.op_ret, dict,
+ &op_errstr, rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
+ "Post Validation failed on operation %s", gd_op_list[op_req.op]);
+ }
+
+ ret = glusterd_mgmt_v3_post_validate_send_resp(req, op_req.op, ret,
+ op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL,
+ "Failed to send Post Validation "
+ "response for operation %s",
+ gd_op_list[op_req.op]);
+ goto out;
+ }
out:
- if (op_errstr && (strcmp (op_errstr, "")))
- GF_FREE (op_errstr);
+ if (op_errstr && (strcmp(op_errstr, "")))
+ GF_FREE(op_errstr);
- free (op_req.dict.dict_val);
+ free(op_req.dict.dict_val);
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- if (rsp_dict)
- dict_unref (rsp_dict);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- /* Return 0 from handler to avoid double deletion of req obj */
- return 0;
+ /* Return 0 from handler to avoid double deletion of req obj */
+ return 0;
}
static int
-glusterd_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
+glusterd_mgmt_v3_unlock_send_resp(rpcsvc_request_t *req, int32_t status)
{
+ gd1_mgmt_v3_unlock_rsp rsp = {
+ {0},
+ };
+ int ret = -1;
+ xlator_t *this = NULL;
- gd1_mgmt_v3_unlock_rsp rsp = {{0},};
- int ret = -1;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
- rsp.op_ret = status;
- if (rsp.op_ret)
- rsp.op_errno = errno;
+ rsp.op_ret = status;
+ if (rsp.op_ret)
+ rsp.op_errno = errno;
- glusterd_get_uuid (&rsp.uuid);
+ glusterd_get_uuid(&rsp.uuid);
- ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
+ ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
- gf_msg_debug (this->name, 0,
- "Responded to mgmt_v3 unlock, ret: %d", ret);
+ gf_msg_debug(this->name, 0, "Responded to mgmt_v3 unlock, ret: %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_syctasked_mgmt_v3_unlock (rpcsvc_request_t *req,
+glusterd_syctasked_mgmt_v3_unlock(rpcsvc_request_t *req,
gd1_mgmt_v3_unlock_req *unlock_req,
glusterd_op_lock_ctx_t *ctx)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (ctx);
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(ctx);
- /* Trying to release multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_unlock (ctx->dict, ctx->uuid);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Failed to release mgmt_v3 locks for %s",
- uuid_utoa(ctx->uuid));
- }
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock(ctx->dict, ctx->uuid);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Failed to release mgmt_v3 locks for %s", uuid_utoa(ctx->uuid));
+ }
- ret = glusterd_mgmt_v3_unlock_send_resp (req, ret);
+ ret = glusterd_mgmt_v3_unlock_send_resp(req, ret);
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
-
static int
-glusterd_op_state_machine_mgmt_v3_unlock (rpcsvc_request_t *req,
+glusterd_op_state_machine_mgmt_v3_unlock(rpcsvc_request_t *req,
gd1_mgmt_v3_unlock_req *lock_req,
glusterd_op_lock_ctx_t *ctx)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_UNLOCK,
- &lock_req->txn_id, ctx);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_EVENT_UNLOCK_FAIL,
- "Failed to inject event GD_OP_EVENT_UNLOCK");
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_UNLOCK, &lock_req->txn_id,
+ ctx);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_EVENT_UNLOCK_FAIL,
+ "Failed to inject event GD_OP_EVENT_UNLOCK");
- glusterd_friend_sm ();
- glusterd_op_sm ();
+ glusterd_friend_sm();
+ glusterd_op_sm();
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_handle_mgmt_v3_unlock_fn (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_unlock_fn(rpcsvc_request_t *req)
{
- gd1_mgmt_v3_unlock_req lock_req = {{0},};
- int32_t ret = -1;
- glusterd_op_lock_ctx_t *ctx = NULL;
- xlator_t *this = NULL;
- gf_boolean_t is_synctasked = _gf_false;
- gf_boolean_t free_ctx = _gf_false;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
-
- ret = xdr_to_generic (req->msg[0], &lock_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
- if (ret < 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_REQ_DECODE_FAIL, "Failed to decode unlock "
- "request received from peer");
- req->rpc_err = GARBAGE_ARGS;
- goto out;
- }
-
- gf_msg_debug (this->name, 0, "Received volume unlock req "
- "from uuid: %s", uuid_utoa (lock_req.uuid));
-
- if (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PEER_NOT_FOUND, "%s doesn't "
- "belong to the cluster. Ignoring request.",
- uuid_utoa (lock_req.uuid));
- ret = -1;
- goto out;
- }
-
- ctx = GF_CALLOC (1, sizeof (*ctx), gf_gld_mt_op_lock_ctx_t);
- if (!ctx) {
- ret = -1;
- goto out;
- }
-
- gf_uuid_copy (ctx->uuid, lock_req.uuid);
- ctx->req = req;
-
- ctx->dict = dict_new ();
- if (!ctx->dict) {
- ret = -1;
- goto out;
- }
-
- ret = dict_unserialize (lock_req.dict.dict_val,
- lock_req.dict.dict_len, &ctx->dict);
+ gd1_mgmt_v3_unlock_req lock_req = {
+ {0},
+ };
+ int32_t ret = -1;
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_synctasked = _gf_false;
+ gf_boolean_t free_ctx = _gf_false;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &lock_req,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
+ "Failed to decode unlock "
+ "request received from peer");
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ gf_msg_debug(this->name, 0,
+ "Received volume unlock req "
+ "from uuid: %s",
+ uuid_utoa(lock_req.uuid));
+
+ if (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
+ "%s doesn't "
+ "belong to the cluster. Ignoring request.",
+ uuid_utoa(lock_req.uuid));
+ ret = -1;
+ goto out;
+ }
+
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
+ if (!ctx) {
+ ret = -1;
+ goto out;
+ }
+
+ gf_uuid_copy(ctx->uuid, lock_req.uuid);
+ ctx->req = req;
+
+ ctx->dict = dict_new();
+ if (!ctx->dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize(lock_req.dict.dict_val, lock_req.dict.dict_len,
+ &ctx->dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to unserialize the dictionary");
+ goto out;
+ }
+
+ is_synctasked = dict_get_str_boolean(ctx->dict, "is_synctasked", _gf_false);
+ if (is_synctasked) {
+ ret = glusterd_syctasked_mgmt_v3_unlock(req, &lock_req, ctx);
if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
- goto out;
- }
-
- is_synctasked = dict_get_str_boolean (ctx->dict,
- "is_synctasked", _gf_false);
- if (is_synctasked) {
- ret = glusterd_syctasked_mgmt_v3_unlock (req, &lock_req, ctx);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Failed to release mgmt_v3_locks");
- /* Ignore the return code, as it shouldn't be propagated
- * from the handler function so as to avoid double
- * deletion of the req
- */
- ret = 0;
- }
-
- /* The above function does not take ownership of ctx.
- * Therefore we need to free the ctx explicitly. */
- free_ctx = _gf_true;
- }
- else {
- /* Shouldn't ignore the return code here, and it should
- * be propagated from the handler function as in failure
- * case it doesn't delete the req object
- */
- ret = glusterd_op_state_machine_mgmt_v3_unlock (req, &lock_req,
- ctx);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Failed to release mgmt_v3_locks");
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Failed to release mgmt_v3_locks");
+ /* Ignore the return code, as it shouldn't be propagated
+ * from the handler function so as to avoid double
+ * deletion of the req
+ */
+ ret = 0;
+ }
+
+ /* The above function does not take ownership of ctx.
+ * Therefore we need to free the ctx explicitly. */
+ free_ctx = _gf_true;
+ } else {
+ /* Shouldn't ignore the return code here, and it should
+ * be propagated from the handler function as in failure
+ * case it doesn't delete the req object
+ */
+ ret = glusterd_op_state_machine_mgmt_v3_unlock(req, &lock_req, ctx);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Failed to release mgmt_v3_locks");
+ }
out:
- if (ctx && (ret || free_ctx)) {
- if (ctx->dict)
- dict_unref (ctx->dict);
+ if (ctx && (ret || free_ctx)) {
+ if (ctx->dict)
+ dict_unref(ctx->dict);
- GF_FREE (ctx);
- }
+ GF_FREE(ctx);
+ }
- free (lock_req.dict.dict_val);
+ free(lock_req.dict.dict_val);
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_handle_mgmt_v3_lock (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_lock(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- glusterd_handle_mgmt_v3_lock_fn);
+ return glusterd_big_locked_handler(req, glusterd_handle_mgmt_v3_lock_fn);
}
static int
-glusterd_handle_pre_validate (rpcsvc_request_t *req)
+glusterd_handle_pre_validate(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- glusterd_handle_pre_validate_fn);
+ return glusterd_big_locked_handler(req, glusterd_handle_pre_validate_fn);
}
static int
-glusterd_handle_brick_op (rpcsvc_request_t *req)
+glusterd_handle_brick_op(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- glusterd_handle_brick_op_fn);
+ return glusterd_big_locked_handler(req, glusterd_handle_brick_op_fn);
}
static int
-glusterd_handle_commit (rpcsvc_request_t *req)
+glusterd_handle_commit(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- glusterd_handle_commit_fn);
+ return glusterd_big_locked_handler(req, glusterd_handle_commit_fn);
}
static int
-glusterd_handle_post_validate (rpcsvc_request_t *req)
+glusterd_handle_post_validate(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- glusterd_handle_post_validate_fn);
+ return glusterd_big_locked_handler(req, glusterd_handle_post_validate_fn);
}
int
-glusterd_handle_mgmt_v3_unlock (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_unlock(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler (req,
- glusterd_handle_mgmt_v3_unlock_fn);
+ return glusterd_big_locked_handler(req, glusterd_handle_mgmt_v3_unlock_fn);
}
rpcsvc_actor_t gd_svc_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
- [GLUSTERD_MGMT_V3_NULL] = { "NULL", GLUSTERD_MGMT_V3_NULL, glusterd_mgmt_v3_null, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_LOCK] = { "MGMT_V3_LOCK", GLUSTERD_MGMT_V3_LOCK, glusterd_handle_mgmt_v3_lock, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_PRE_VALIDATE] = { "PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE, glusterd_handle_pre_validate, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_BRICK_OP] = { "BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP, glusterd_handle_brick_op, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_COMMIT] = { "COMMIT", GLUSTERD_MGMT_V3_COMMIT, glusterd_handle_commit, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_POST_VALIDATE] = { "POST_VAL", GLUSTERD_MGMT_V3_POST_VALIDATE, glusterd_handle_post_validate, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_UNLOCK] = { "MGMT_V3_UNLOCK", GLUSTERD_MGMT_V3_UNLOCK, glusterd_handle_mgmt_v3_unlock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_NULL] = {"NULL", GLUSTERD_MGMT_V3_NULL,
+ glusterd_mgmt_v3_null, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", GLUSTERD_MGMT_V3_LOCK,
+ glusterd_handle_mgmt_v3_lock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_PRE_VALIDATE] = {"PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE,
+ glusterd_handle_pre_validate, NULL, 0,
+ DRC_NA},
+ [GLUSTERD_MGMT_V3_BRICK_OP] = {"BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP,
+ glusterd_handle_brick_op, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_COMMIT] = {"COMMIT", GLUSTERD_MGMT_V3_COMMIT,
+ glusterd_handle_commit, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_POST_VALIDATE] = {"POST_VAL",
+ GLUSTERD_MGMT_V3_POST_VALIDATE,
+ glusterd_handle_post_validate, NULL, 0,
+ DRC_NA},
+ [GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK", GLUSTERD_MGMT_V3_UNLOCK,
+ glusterd_handle_mgmt_v3_unlock, NULL, 0,
+ DRC_NA},
};
struct rpcsvc_program gd_svc_mgmt_v3_prog = {
- .progname = "GlusterD svc mgmt v3",
- .prognum = GD_MGMT_PROGRAM,
- .progver = GD_MGMT_V3_VERSION,
- .numactors = GLUSTERD_MGMT_V3_MAXVALUE,
- .actors = gd_svc_mgmt_v3_actors,
- .synctask = _gf_true,
+ .progname = "GlusterD svc mgmt v3",
+ .prognum = GD_MGMT_PROGRAM,
+ .progver = GD_MGMT_V3_VERSION,
+ .numactors = GLUSTERD_MGMT_V3_MAXVALUE,
+ .actors = gd_svc_mgmt_v3_actors,
+ .synctask = _gf_true,
};
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index 2714478cda6..6534530b52f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -28,2637 +28,2515 @@
extern struct rpc_clnt_program gd_mgmt_v3_prog;
-
void
-gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
- char *op_errstr, int op_code, uuid_t peerid,
- u_char *uuid)
+gd_mgmt_v3_collate_errors(struct syncargs *args, int op_ret, int op_errno,
+ char *op_errstr, int op_code, uuid_t peerid,
+ u_char *uuid)
{
- char *peer_str = NULL;
- char err_str[PATH_MAX] = "Please check log file for details.";
- char op_err[PATH_MAX] = "";
- xlator_t *this = NULL;
- int is_operrstr_blk = 0;
- char *err_string = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (args);
- GF_ASSERT (uuid);
-
- if (op_ret) {
- args->op_ret = op_ret;
- args->op_errno = op_errno;
-
- rcu_read_lock ();
- peerinfo = glusterd_peerinfo_find (peerid, NULL);
- if (peerinfo)
- peer_str = gf_strdup (peerinfo->hostname);
- else
- peer_str = gf_strdup (uuid_utoa (uuid));
-
- rcu_read_unlock ();
-
- is_operrstr_blk = (op_errstr && strcmp (op_errstr, ""));
- err_string = (is_operrstr_blk) ? op_errstr : err_str;
-
- switch (op_code) {
- case GLUSTERD_MGMT_V3_LOCK:
- {
- snprintf (op_err, sizeof(op_err),
- "Locking failed on %s. %s",
- peer_str, err_string);
- break;
- }
- case GLUSTERD_MGMT_V3_PRE_VALIDATE:
- {
- snprintf (op_err, sizeof(op_err),
- "Pre Validation failed on %s. %s",
- peer_str, err_string);
- break;
- }
- case GLUSTERD_MGMT_V3_BRICK_OP:
- {
- snprintf (op_err, sizeof(op_err),
- "Brick ops failed on %s. %s",
- peer_str, err_string);
- break;
- }
- case GLUSTERD_MGMT_V3_COMMIT:
- {
- snprintf (op_err, sizeof(op_err),
- "Commit failed on %s. %s",
- peer_str, err_string);
- break;
- }
- case GLUSTERD_MGMT_V3_POST_VALIDATE:
- {
- snprintf (op_err, sizeof(op_err),
- "Post Validation failed on %s. %s",
- peer_str, err_string);
- break;
- }
- case GLUSTERD_MGMT_V3_UNLOCK:
- {
- snprintf (op_err, sizeof(op_err),
- "Unlocking failed on %s. %s",
- peer_str, err_string);
- break;
- }
- default :
- snprintf (op_err, sizeof(op_err),
- "Unknown error! on %s. %s",
- peer_str, err_string);
- }
-
- if (args->errstr) {
- len = snprintf (err_str, sizeof(err_str),
- "%s\n%s", args->errstr, op_err);
- if (len < 0) {
- strcpy(err_str, "<error>");
- }
- GF_FREE (args->errstr);
- args->errstr = NULL;
- } else
- snprintf (err_str, sizeof(err_str), "%s", op_err);
-
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_OP_FAIL, "%s", op_err);
- args->errstr = gf_strdup (err_str);
- }
-
- GF_FREE (peer_str);
-
- return;
+ char *peer_str = NULL;
+ char err_str[PATH_MAX] = "Please check log file for details.";
+ char op_err[PATH_MAX] = "";
+ xlator_t *this = NULL;
+ int is_operrstr_blk = 0;
+ char *err_string = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(args);
+ GF_ASSERT(uuid);
+
+ if (op_ret) {
+ args->op_ret = op_ret;
+ args->op_errno = op_errno;
+
+ rcu_read_lock();
+ peerinfo = glusterd_peerinfo_find(peerid, NULL);
+ if (peerinfo)
+ peer_str = gf_strdup(peerinfo->hostname);
+ else
+ peer_str = gf_strdup(uuid_utoa(uuid));
+
+ rcu_read_unlock();
+
+ is_operrstr_blk = (op_errstr && strcmp(op_errstr, ""));
+ err_string = (is_operrstr_blk) ? op_errstr : err_str;
+
+ switch (op_code) {
+ case GLUSTERD_MGMT_V3_LOCK: {
+ snprintf(op_err, sizeof(op_err), "Locking failed on %s. %s",
+ peer_str, err_string);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_PRE_VALIDATE: {
+ snprintf(op_err, sizeof(op_err),
+ "Pre Validation failed on %s. %s", peer_str,
+ err_string);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_BRICK_OP: {
+ snprintf(op_err, sizeof(op_err), "Brick ops failed on %s. %s",
+ peer_str, err_string);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_COMMIT: {
+ snprintf(op_err, sizeof(op_err), "Commit failed on %s. %s",
+ peer_str, err_string);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_POST_VALIDATE: {
+ snprintf(op_err, sizeof(op_err),
+ "Post Validation failed on %s. %s", peer_str,
+ err_string);
+ break;
+ }
+ case GLUSTERD_MGMT_V3_UNLOCK: {
+ snprintf(op_err, sizeof(op_err), "Unlocking failed on %s. %s",
+ peer_str, err_string);
+ break;
+ }
+ default:
+ snprintf(op_err, sizeof(op_err), "Unknown error! on %s. %s",
+ peer_str, err_string);
+ }
+
+ if (args->errstr) {
+ len = snprintf(err_str, sizeof(err_str), "%s\n%s", args->errstr,
+ op_err);
+ if (len < 0) {
+ strcpy(err_str, "<error>");
+ }
+ GF_FREE(args->errstr);
+ args->errstr = NULL;
+ } else
+ snprintf(err_str, sizeof(err_str), "%s", op_err);
+
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_FAIL, "%s",
+ op_err);
+ args->errstr = gf_strdup(err_str);
+ }
+
+ GF_FREE(peer_str);
+
+ return;
}
int32_t
-gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict,
- uint32_t *op_errno)
+gd_mgmt_v3_pre_validate_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict, uint32_t *op_errno)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
- GF_ASSERT (rsp_dict);
- GF_VALIDATE_OR_GOTO (this->name, op_errno, out);
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(rsp_dict);
+ GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
- switch (op) {
+ switch (op) {
case GD_OP_SNAP:
- ret = glusterd_snapshot_prevalidate (dict, op_errstr,
- rsp_dict, op_errno);
+ ret = glusterd_snapshot_prevalidate(dict, op_errstr, rsp_dict,
+ op_errno);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Snapshot Prevalidate Failed");
- goto out;
- }
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_PRE_VALIDATION_FAIL,
+ "Snapshot Prevalidate Failed");
+ goto out;
+ }
- break;
+ break;
case GD_OP_REPLACE_BRICK:
- ret = glusterd_op_stage_replace_brick (dict, op_errstr,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Replace-brick prevalidation failed.");
- goto out;
- }
- break;
+ ret = glusterd_op_stage_replace_brick(dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_PRE_VALIDATION_FAIL,
+ "Replace-brick prevalidation failed.");
+ goto out;
+ }
+ break;
case GD_OP_ADD_TIER_BRICK:
case GD_OP_ADD_BRICK:
- ret = glusterd_op_stage_add_brick (dict, op_errstr, rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "ADD-brick prevalidation failed.");
- goto out;
- }
- break;
+ ret = glusterd_op_stage_add_brick(dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_PRE_VALIDATION_FAIL,
+ "ADD-brick prevalidation failed.");
+ goto out;
+ }
+ break;
case GD_OP_START_VOLUME:
- ret = glusterd_op_stage_start_volume (dict, op_errstr,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Volume start prevalidation failed.");
- goto out;
- }
- break;
+ ret = glusterd_op_stage_start_volume(dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_PRE_VALIDATION_FAIL,
+ "Volume start prevalidation failed.");
+ goto out;
+ }
+ break;
case GD_OP_STOP_VOLUME:
- ret = glusterd_op_stage_stop_volume (dict, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Volume stop prevalidation failed.");
- goto out;
- }
- break;
+ ret = glusterd_op_stage_stop_volume(dict, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_PRE_VALIDATION_FAIL,
+ "Volume stop prevalidation failed.");
+ goto out;
+ }
+ break;
case GD_OP_TIER_START_STOP:
case GD_OP_TIER_STATUS:
case GD_OP_DETACH_TIER_STATUS:
case GD_OP_REMOVE_TIER_BRICK:
- ret = glusterd_op_stage_tier (dict, op_errstr, rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMAND_NOT_FOUND, "tier "
- "prevalidation failed");
- goto out;
- }
- break;
+ ret = glusterd_op_stage_tier(dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMAND_NOT_FOUND,
+ "tier "
+ "prevalidation failed");
+ goto out;
+ }
+ break;
case GD_OP_RESET_BRICK:
- ret = glusterd_reset_brick_prevalidate (dict, op_errstr,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Reset brick prevalidation failed.");
- goto out;
- }
- break;
+ ret = glusterd_reset_brick_prevalidate(dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_PRE_VALIDATION_FAIL,
+ "Reset brick prevalidation failed.");
+ goto out;
+ }
+ break;
case GD_OP_MAX_OPVERSION:
- ret = 0;
- break;
+ ret = 0;
+ break;
default:
- break;
- }
+ break;
+ }
- ret = 0;
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "OP = %d. Returning %d", op, ret);
- return ret;
+ gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret);
+ return ret;
}
int32_t
-gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
+gd_mgmt_v3_brick_op_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
- GF_ASSERT (rsp_dict);
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(rsp_dict);
- switch (op) {
- case GD_OP_SNAP:
- {
- ret = glusterd_snapshot_brickop (dict, op_errstr, rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_BRICK_OP_FAIL,
- "snapshot brickop failed");
- goto out;
- }
- break;
+ switch (op) {
+ case GD_OP_SNAP: {
+ ret = glusterd_snapshot_brickop(dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_BRICK_OP_FAIL,
+ "snapshot brickop failed");
+ goto out;
+ }
+ break;
}
default:
- break;
- }
+ break;
+ }
- ret = 0;
+ ret = 0;
out:
- gf_msg_trace (this->name, 0, "OP = %d. Returning %d", op, ret);
- return ret;
+ gf_msg_trace(this->name, 0, "OP = %d. Returning %d", op, ret);
+ return ret;
}
int32_t
-gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, uint32_t *op_errno,
- dict_t *rsp_dict)
+gd_mgmt_v3_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ uint32_t *op_errno, dict_t *rsp_dict)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- int32_t cmd = 0;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
- GF_VALIDATE_OR_GOTO (this->name, op_errno, out);
- GF_ASSERT (rsp_dict);
-
- glusterd_op_commit_hook (op, dict, GD_COMMIT_HOOK_PRE);
- switch (op) {
- case GD_OP_SNAP:
- {
- ret = glusterd_snapshot (dict, op_errstr,
- op_errno, rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "Snapshot Commit Failed");
- goto out;
- }
- break;
- }
- case GD_OP_REPLACE_BRICK:
- {
- ret = glusterd_op_replace_brick (dict, rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "Replace-brick commit failed.");
- goto out;
- }
- break;
- }
- case GD_OP_ADD_BRICK:
- {
- ret = glusterd_op_add_brick (dict, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "Add-brick commit failed.");
- goto out;
- }
- break;
-
- }
- case GD_OP_START_VOLUME:
- {
- ret = glusterd_op_start_volume (dict, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "Volume start commit failed.");
- goto out;
- }
- break;
-
- }
- case GD_OP_STOP_VOLUME:
- {
- ret = glusterd_op_stop_volume (dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "Volume stop commit failed.");
- goto out;
- }
- break;
- }
- case GD_OP_RESET_BRICK:
- {
- ret = glusterd_op_reset_brick (dict, rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "Reset-brick commit failed.");
- goto out;
- }
- break;
- }
- case GD_OP_MAX_OPVERSION:
- {
- ret = glusterd_op_get_max_opversion (op_errstr,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "Commit failed.");
- goto out;
- }
- break;
- }
- case GD_OP_TIER_START_STOP:
- {
- ret = glusterd_op_tier_start_stop (dict, op_errstr,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "tier commit failed.");
- goto out;
- }
- break;
- }
- case GD_OP_REMOVE_TIER_BRICK:
- {
- ret = glusterd_op_remove_tier_brick (dict, op_errstr,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "tier detach commit failed.");
- goto out;
- }
- ret = dict_get_int32n (dict, "rebalance-command",
- SLEN ("rebalance-command"),
- &cmd);
- if (ret) {
- gf_msg_debug (this->name, 0, "cmd not found");
- goto out;
- }
-
- if (cmd != GF_DEFRAG_CMD_DETACH_STOP)
- break;
- }
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_TIER_STATUS:
- {
- ret = glusterd_op_tier_status (dict, op_errstr,
- rsp_dict, op);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "tier status commit failed");
- goto out;
- }
- break;
- }
- case GD_OP_ADD_TIER_BRICK:
- {
- ret = glusterd_op_add_tier_brick (dict, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "tier add-brick commit failed.");
- goto out;
- }
- break;
-
- }
-
- default:
- break;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ int32_t cmd = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
+ GF_ASSERT(rsp_dict);
+
+ glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_PRE);
+ switch (op) {
+ case GD_OP_SNAP: {
+ ret = glusterd_snapshot(dict, op_errstr, op_errno, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Snapshot Commit Failed");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_REPLACE_BRICK: {
+ ret = glusterd_op_replace_brick(dict, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Replace-brick commit failed.");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_ADD_BRICK: {
+ ret = glusterd_op_add_brick(dict, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Add-brick commit failed.");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_START_VOLUME: {
+ ret = glusterd_op_start_volume(dict, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Volume start commit failed.");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_STOP_VOLUME: {
+ ret = glusterd_op_stop_volume(dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Volume stop commit failed.");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_RESET_BRICK: {
+ ret = glusterd_op_reset_brick(dict, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Reset-brick commit failed.");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_MAX_OPVERSION: {
+ ret = glusterd_op_get_max_opversion(op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Commit failed.");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_TIER_START_STOP: {
+ ret = glusterd_op_tier_start_stop(dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "tier commit failed.");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_REMOVE_TIER_BRICK: {
+ ret = glusterd_op_remove_tier_brick(dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "tier detach commit failed.");
+ goto out;
+ }
+ ret = dict_get_int32n(dict, "rebalance-command",
+ SLEN("rebalance-command"), &cmd);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "cmd not found");
+ goto out;
+ }
+
+ if (cmd != GF_DEFRAG_CMD_DETACH_STOP)
+ break;
}
-
- ret = 0;
-out:
- gf_msg_debug (this->name, 0, "OP = %d. Returning %d", op, ret);
- return ret;
-}
-
-int32_t
-gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
-{
- int32_t ret = -1;
- xlator_t *this = NULL;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_svc_t *svc = NULL;
-
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
- GF_ASSERT (rsp_dict);
-
- if (op_ret == 0)
- glusterd_op_commit_hook (op, dict, GD_COMMIT_HOOK_POST);
-
- switch (op) {
- case GD_OP_SNAP:
- {
- ret = glusterd_snapshot_postvalidate (dict, op_ret,
- op_errstr,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_POST_VALIDATION_FAIL,
- "postvalidate operation failed");
- goto out;
- }
- break;
- }
- case GD_OP_ADD_BRICK:
- {
- ret = dict_get_strn (dict, "volname",
- SLEN ("volname"), &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get"
- " volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "Unable to "
- "allocate memory");
- goto out;
- }
- ret = glusterd_create_volfiles_and_notify_services (
- volinfo);
- if (ret)
- goto out;
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
- goto out;
- break;
-
- }
- case GD_OP_START_VOLUME:
- {
- ret = dict_get_strn (dict, "volname",
- SLEN ("volname"), &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get"
- " volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "Unable to "
- "allocate memory");
- goto out;
- }
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->manager (svc, volinfo,
- PROC_START_NO_WAIT);
- if (ret)
- goto out;
- }
- break;
- }
- case GD_OP_STOP_VOLUME:
- {
- ret = dict_get_strn (dict, "volname",
- SLEN ("volname"), &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get"
- " volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "Unable to "
- "allocate memory");
- goto out;
- }
- break;
- }
- case GD_OP_ADD_TIER_BRICK:
- {
- ret = dict_get_strn (dict, "volname",
- SLEN ("volname"), &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get"
- " volume name");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "Unable to "
- "allocate memory");
- goto out;
- }
- ret = glusterd_create_volfiles_and_notify_services (
- volinfo);
- if (ret)
- goto out;
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
- goto out;
- ret = dict_get_strn (dict, "volname",
- SLEN ("volname"), &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get"
- " volume name");
- goto out;
- }
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "dict set "
- "failed");
- goto out;
- }
- ret = -1;
- svc = &(volinfo->tierd.svc);
- ret = svc->manager (svc, volinfo,
- PROC_START_NO_WAIT);
- if (ret)
- goto out;
- }
-
- default:
- break;
+ case GD_OP_DETACH_TIER_STATUS:
+ case GD_OP_TIER_STATUS: {
+ ret = glusterd_op_tier_status(dict, op_errstr, rsp_dict, op);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_COMMIT_OP_FAIL,
+ "tier status commit failed");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_ADD_TIER_BRICK: {
+ ret = glusterd_op_add_tier_brick(dict, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "tier add-brick commit failed.");
+ goto out;
+ }
+ break;
}
- ret = 0;
+ default:
+ break;
+ }
+ ret = 0;
out:
- gf_msg_trace (this->name, 0, "OP = %d. Returning %d", op, ret);
- return ret;
+ gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret);
+ return ret;
}
int32_t
-gd_mgmt_v3_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
{
- int32_t ret = -1;
- struct syncargs *args = NULL;
- gd1_mgmt_v3_lock_rsp rsp = {{0},};
- call_frame_t *frame = NULL;
- int32_t op_ret = -1;
- int32_t op_errno = -1;
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (myframe);
-
- /* Even though the lock command has failed, while collating the errors
- (gd_mgmt_v3_collate_errors), args->op_ret and args->op_errno will be
- used. @args is obtained from frame->local. So before checking the
- status of the request and going out if its a failure, args should be
- set to frame->local. Otherwise, while collating args will be NULL.
- This applies to other phases such as prevalidate, brickop, commit and
- postvalidate also.
- */
- frame = myframe;
- args = frame->local;
- peerid = frame->cookie;
- frame->local = NULL;
- frame->cookie = NULL;
-
- if (-1 == req->rpc_status) {
- op_errno = ENOTCONN;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_svc_t *svc = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(rsp_dict);
+
+ if (op_ret == 0)
+ glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_POST);
+
+ switch (op) {
+ case GD_OP_SNAP: {
+ ret = glusterd_snapshot_postvalidate(dict, op_ret, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_POST_VALIDATION_FAIL,
+ "postvalidate operation failed");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_ADD_BRICK: {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get"
+ " volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Unable to "
+ "allocate memory");
+ goto out;
+ }
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret)
+ goto out;
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ goto out;
+ break;
+ }
+ case GD_OP_START_VOLUME: {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get"
+ " volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Unable to "
+ "allocate memory");
+ goto out;
+ }
+
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ svc = &(volinfo->tierd.svc);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_STOP_VOLUME: {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get"
+ " volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Unable to "
+ "allocate memory");
+ goto out;
+ }
+ break;
+ }
+ case GD_OP_ADD_TIER_BRICK: {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get"
+ " volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Unable to "
+ "allocate memory");
+ goto out;
+ }
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret)
+ goto out;
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ goto out;
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get"
+ " volume name");
+ goto out;
+ }
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "dict set "
+ "failed");
+ goto out;
+ }
+ ret = -1;
+ svc = &(volinfo->tierd.svc);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
+ if (ret)
goto out;
}
- GF_VALIDATE_OR_GOTO_WITH_ERROR (this->name, iov, out, op_errno,
- EINVAL);
+ default:
+ break;
+ }
- ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
- if (ret < 0)
- goto out;
+ ret = 0;
- gf_uuid_copy (args->uuid, rsp.uuid);
+out:
+ gf_msg_trace(this->name, 0, "OP = %d. Returning %d", op, ret);
+ return ret;
+}
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
+int32_t
+gd_mgmt_v3_lock_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ gd1_mgmt_v3_lock_rsp rsp = {
+ {0},
+ };
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(myframe);
+
+ /* Even though the lock command has failed, while collating the errors
+ (gd_mgmt_v3_collate_errors), args->op_ret and args->op_errno will be
+ used. @args is obtained from frame->local. So before checking the
+ status of the request and going out if its a failure, args should be
+ set to frame->local. Otherwise, while collating args will be NULL.
+ This applies to other phases such as prevalidate, brickop, commit and
+ postvalidate also.
+ */
+ frame = myframe;
+ args = frame->local;
+ peerid = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
+ if (ret < 0)
+ goto out;
+
+ gf_uuid_copy(args->uuid, rsp.uuid);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
out:
- gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_LOCK, *peerid, rsp.uuid);
- GF_FREE (peerid);
-
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
- * the caller function.
- */
- if (req->rpc_status != -1)
- STACK_DESTROY (frame->root);
- synctask_barrier_wake(args);
- return 0;
+ gd_mgmt_v3_collate_errors(args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_V3_LOCK, *peerid, rsp.uuid);
+ GF_FREE(peerid);
+
+ if (rsp.dict.dict_val)
+ free(rsp.dict.dict_val);
+ /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
+ * the caller function.
+ */
+ if (req->rpc_status != -1)
+ STACK_DESTROY(frame->root);
+ synctask_barrier_wake(args);
+ return 0;
}
int32_t
-gd_mgmt_v3_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_lock_cbk_fn);
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ gd_mgmt_v3_lock_cbk_fn);
}
int
-gd_mgmt_v3_lock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_lock(glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid, uuid_t recv_uuid)
{
- gd1_mgmt_v3_lock_req req = {{0},};
- int32_t ret = -1;
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (op_ctx);
- GF_ASSERT (peerinfo);
- GF_ASSERT (args);
-
- ret = dict_allocate_and_serialize (op_ctx,
- &req.dict.dict_val,
- &req.dict.dict_len);
- if (ret)
- goto out;
-
- gf_uuid_copy (req.uuid, my_uuid);
- req.op = op;
-
- GD_ALLOC_COPY_UUID (peerid, peerinfo->uuid, ret);
- if (ret)
- goto out;
-
- ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerid,
- &gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_LOCK,
- gd_mgmt_v3_lock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_lock_req);
+ gd1_mgmt_v3_lock_req req = {
+ {0},
+ };
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ gf_uuid_copy(req.uuid, my_uuid);
+ req.op = op;
+
+ GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
+ if (ret)
+ goto out;
+
+ ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
+ &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_LOCK,
+ gd_mgmt_v3_lock_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
out:
- GF_FREE (req.dict.dict_val);
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ GF_FREE(req.dict.dict_val);
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,
- char **op_errstr, uint32_t *op_errno,
- gf_boolean_t *is_acquired,
- uint32_t txn_generation)
+glusterd_mgmt_v3_initiate_lockdown(glusterd_op_t op, dict_t *dict,
+ char **op_errstr, uint32_t *op_errno,
+ gf_boolean_t *is_acquired,
+ uint32_t txn_generation)
{
- glusterd_peerinfo_t *peerinfo = NULL;
- int32_t ret = -1;
- int32_t peer_cnt = 0;
- struct syncargs args = {0};
- uuid_t peer_uuid = {0};
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- uint32_t timeout = 0;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
- GF_ASSERT (is_acquired);
-
- /* Cli will add timeout key to dict if the default timeout is
- * other than 2 minutes. Here we use this value to check whether
- * mgmt_v3_lock_timeout should be set to default value or we
- * need to change the value according to timeout value
- * i.e, timeout + 120 seconds. */
- ret = dict_get_uint32 (dict, "timeout", &timeout);
- if (!ret)
- conf->mgmt_v3_lock_timeout = timeout + 120;
-
- /* Trying to acquire multiple mgmt_v3 locks on local node */
- ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID, op_errno);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL,
- "Failed to acquire mgmt_v3 locks on localhost");
- goto out;
- }
+ glusterd_peerinfo_t *peerinfo = NULL;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ struct syncargs args = {0};
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ uint32_t timeout = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(is_acquired);
+
+ /* Cli will add timeout key to dict if the default timeout is
+ * other than 2 minutes. Here we use this value to check whether
+ * mgmt_v3_lock_timeout should be set to default value or we
+ * need to change the value according to timeout value
+ * i.e, timeout + 120 seconds. */
+ ret = dict_get_uint32(dict, "timeout", &timeout);
+ if (!ret)
+ conf->mgmt_v3_lock_timeout = timeout + 120;
+
+ /* Trying to acquire multiple mgmt_v3 locks on local node */
+ ret = glusterd_multiple_mgmt_v3_lock(dict, MY_UUID, op_errno);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "Failed to acquire mgmt_v3 locks on localhost");
+ goto out;
+ }
+
+ *is_acquired = _gf_true;
+
+ /* Sending mgmt_v3 lock req to other nodes in the cluster */
+ gd_syncargs_init(&args, NULL);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
- *is_acquired = _gf_true;
-
- /* Sending mgmt_v3 lock req to other nodes in the cluster */
- gd_syncargs_init (&args, NULL);
- synctask_barrier_init((&args));
- peer_cnt = 0;
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > txn_generation)
- continue;
-
- if (!peerinfo->connected)
- continue;
- if (op != GD_OP_SYNC_VOLUME &&
- peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
- continue;
-
- gd_mgmt_v3_lock (op, dict, peerinfo, &args,
- MY_UUID, peer_uuid);
- peer_cnt++;
- }
- rcu_read_unlock ();
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
- if (0 == peer_cnt) {
- ret = 0;
- goto out;
- }
+ gd_mgmt_v3_lock(op, dict, peerinfo, &args, MY_UUID, peer_uuid);
+ peer_cnt++;
+ }
+ rcu_read_unlock();
- gd_synctask_barrier_wait((&args), peer_cnt);
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
- if (args.errstr)
- *op_errstr = gf_strdup (args.errstr);
+ gd_synctask_barrier_wait((&args), peer_cnt);
+
+ if (args.errstr)
+ *op_errstr = gf_strdup(args.errstr);
- ret = args.op_ret;
- *op_errno = args.op_errno;
+ ret = args.op_ret;
+ *op_errno = args.op_errno;
- gf_msg_debug (this->name, 0, "Sent lock op req for %s "
- "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+ gf_msg_debug(this->name, 0,
+ "Sent lock op req for %s "
+ "to %d peers. Returning %d",
+ gd_op_list[op], peer_cnt, ret);
out:
- if (ret) {
- if (*op_errstr)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL, "%s",
- *op_errstr);
+ if (ret) {
+ if (*op_errstr)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "%s", *op_errstr);
- ret = gf_asprintf (op_errstr,
- "Another transaction is in progress. "
- "Please try again after some time.");
+ ret = gf_asprintf(op_errstr,
+ "Another transaction is in progress. "
+ "Please try again after some time.");
- if (ret == -1)
- *op_errstr = NULL;
+ if (ret == -1)
+ *op_errstr = NULL;
- ret = -1;
- }
+ ret = -1;
+ }
- return ret;
+ return ret;
}
int
-glusterd_pre_validate_aggr_rsp_dict (glusterd_op_t op,
- dict_t *aggr, dict_t *rsp)
+glusterd_pre_validate_aggr_rsp_dict(glusterd_op_t op, dict_t *aggr, dict_t *rsp)
{
- int32_t ret = 0;
- xlator_t *this = NULL;
+ int32_t ret = 0;
+ xlator_t *this = NULL;
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (aggr);
- GF_ASSERT (rsp);
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(aggr);
+ GF_ASSERT(rsp);
- switch (op) {
+ switch (op) {
case GD_OP_SNAP:
- ret = glusterd_snap_pre_validate_use_rsp_dict (aggr, rsp);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Failed to aggregate prevalidate "
- "response dictionaries.");
- goto out;
- }
- break;
+ ret = glusterd_snap_pre_validate_use_rsp_dict(aggr, rsp);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Failed to aggregate prevalidate "
+ "response dictionaries.");
+ goto out;
+ }
+ break;
case GD_OP_REPLACE_BRICK:
- ret = glusterd_rb_use_rsp_dict (aggr, rsp);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Failed to aggregate prevalidate "
- "response dictionaries.");
- goto out;
- }
- break;
+ ret = glusterd_rb_use_rsp_dict(aggr, rsp);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Failed to aggregate prevalidate "
+ "response dictionaries.");
+ goto out;
+ }
+ break;
case GD_OP_START_VOLUME:
case GD_OP_ADD_BRICK:
case GD_OP_ADD_TIER_BRICK:
- ret = glusterd_aggr_brick_mount_dirs (aggr, rsp);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_MOUNDIRS_AGGR_FAIL, "Failed to "
- "aggregate brick mount dirs");
- goto out;
- }
- break;
+ ret = glusterd_aggr_brick_mount_dirs(aggr, rsp);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRICK_MOUNDIRS_AGGR_FAIL,
+ "Failed to "
+ "aggregate brick mount dirs");
+ goto out;
+ }
+ break;
case GD_OP_RESET_BRICK:
- ret = glusterd_rb_use_rsp_dict (aggr, rsp);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Failed to aggregate prevalidate "
- "response dictionaries.");
- goto out;
- }
+ ret = glusterd_rb_use_rsp_dict(aggr, rsp);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Failed to aggregate prevalidate "
+ "response dictionaries.");
+ goto out;
+ }
case GD_OP_STOP_VOLUME:
case GD_OP_TIER_STATUS:
case GD_OP_DETACH_TIER_STATUS:
case GD_OP_TIER_START_STOP:
case GD_OP_REMOVE_TIER_BRICK:
- break;
+ break;
case GD_OP_MAX_OPVERSION:
- break;
+ break;
default:
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "Invalid op (%s)",
- gd_op_list[op]);
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "Invalid op (%s)", gd_op_list[op]);
- break;
- }
+ break;
+ }
out:
- return ret;
+ return ret;
}
int32_t
-gd_mgmt_v3_pre_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_pre_validate_cbk_fn(struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- int32_t ret = -1;
- struct syncargs *args = NULL;
- gd1_mgmt_v3_pre_val_rsp rsp = {{0},};
- call_frame_t *frame = NULL;
- int32_t op_ret = -1;
- int32_t op_errno = -1;
- dict_t *rsp_dict = NULL;
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (myframe);
-
- frame = myframe;
- args = frame->local;
- peerid = frame->cookie;
- frame->local = NULL;
- frame->cookie = NULL;
-
- if (-1 == req->rpc_status) {
- op_errno = ENOTCONN;
- goto out;
- }
-
- GF_VALIDATE_OR_GOTO_WITH_ERROR (this->name, iov, out, op_errno,
- EINVAL);
-
- ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp);
- if (ret < 0)
- goto out;
-
- if (rsp.dict.dict_len) {
- /* Unserialize the dictionary */
- rsp_dict = dict_new ();
-
- ret = dict_unserialize (rsp.dict.dict_val,
- rsp.dict.dict_len,
- &rsp_dict);
- if (ret < 0) {
- free (rsp.dict.dict_val);
- goto out;
- } else {
- rsp_dict->extra_stdfree = rsp.dict.dict_val;
- }
- }
-
- gf_uuid_copy (args->uuid, rsp.uuid);
- pthread_mutex_lock (&args->lock_dict);
- {
- ret = glusterd_pre_validate_aggr_rsp_dict (rsp.op, args->dict,
- rsp_dict);
- }
- pthread_mutex_unlock (&args->lock_dict);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_RESP_AGGR_FAIL, "%s",
- "Failed to aggregate response from "
- " node/brick");
- if (!rsp.op_ret)
- op_ret = ret;
- else {
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
- }
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ gd1_mgmt_v3_pre_val_rsp rsp = {
+ {0},
+ };
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerid = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp);
+ if (ret < 0)
+ goto out;
+
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new();
+
+ ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
+ if (ret < 0) {
+ free(rsp.dict.dict_val);
+ goto out;
} else {
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
- }
+ rsp_dict->extra_stdfree = rsp.dict.dict_val;
+ }
+ }
+
+ gf_uuid_copy(args->uuid, rsp.uuid);
+ pthread_mutex_lock(&args->lock_dict);
+ {
+ ret = glusterd_pre_validate_aggr_rsp_dict(rsp.op, args->dict, rsp_dict);
+ }
+ pthread_mutex_unlock(&args->lock_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ if (!rsp.op_ret)
+ op_ret = ret;
+ else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+ } else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
out:
- if (rsp_dict)
- dict_unref (rsp_dict);
-
- gd_mgmt_v3_collate_errors (args, op_ret, op_errno, rsp.op_errstr,
- GLUSTERD_MGMT_V3_PRE_VALIDATE,
- *peerid, rsp.uuid);
-
- if (rsp.op_errstr)
- free (rsp.op_errstr);
- GF_FREE (peerid);
- /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
- * the caller function.
- */
- if (req->rpc_status != -1)
- STACK_DESTROY (frame->root);
- synctask_barrier_wake(args);
- return 0;
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+
+ gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
+ GLUSTERD_MGMT_V3_PRE_VALIDATE, *peerid, rsp.uuid);
+
+ if (rsp.op_errstr)
+ free(rsp.op_errstr);
+ GF_FREE(peerid);
+ /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
+ * the caller function.
+ */
+ if (req->rpc_status != -1)
+ STACK_DESTROY(frame->root);
+ synctask_barrier_wake(args);
+ return 0;
}
int32_t
-gd_mgmt_v3_pre_validate_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_pre_validate_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_pre_validate_cbk_fn);
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ gd_mgmt_v3_pre_validate_cbk_fn);
}
int
-gd_mgmt_v3_pre_validate_req (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_pre_validate_req(glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
{
- int32_t ret = -1;
- gd1_mgmt_v3_pre_val_req req = {{0},};
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (op_ctx);
- GF_ASSERT (peerinfo);
- GF_ASSERT (args);
-
- ret = dict_allocate_and_serialize (op_ctx,
- &req.dict.dict_val,
- &req.dict.dict_len);
- if (ret)
- goto out;
-
- gf_uuid_copy (req.uuid, my_uuid);
- req.op = op;
-
- GD_ALLOC_COPY_UUID (peerid, peerinfo->uuid, ret);
- if (ret)
- goto out;
-
- ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerid,
- &gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_PRE_VALIDATE,
- gd_mgmt_v3_pre_validate_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_pre_val_req);
+ int32_t ret = -1;
+ gd1_mgmt_v3_pre_val_req req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ gf_uuid_copy(req.uuid, my_uuid);
+ req.op = op;
+
+ GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
+ if (ret)
+ goto out;
+
+ ret = gd_syncop_submit_request(
+ peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_PRE_VALIDATE, gd_mgmt_v3_pre_validate_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_req);
out:
- GF_FREE (req.dict.dict_val);
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ GF_FREE(req.dict.dict_val);
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *req_dict,
- char **op_errstr, uint32_t *op_errno,
- uint32_t txn_generation)
+glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict,
+ char **op_errstr, uint32_t *op_errno,
+ uint32_t txn_generation)
{
- int32_t ret = -1;
- int32_t peer_cnt = 0;
- dict_t *rsp_dict = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- struct syncargs args = {0};
- uuid_t peer_uuid = {0};
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- GF_ASSERT (req_dict);
- GF_ASSERT (op_errstr);
- GF_VALIDATE_OR_GOTO (this->name, op_errno, out);
-
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to create response dictionary");
- goto out;
- }
-
- /* Pre Validation on local node */
- ret = gd_mgmt_v3_pre_validate_fn (op, req_dict, op_errstr,
- rsp_dict, op_errno);
-
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_ASSERT(req_dict);
+ GF_ASSERT(op_errstr);
+ GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Pre Validation on local node */
+ ret = gd_mgmt_v3_pre_validate_fn(op, req_dict, op_errstr, rsp_dict,
+ op_errno);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Pre Validation failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf(op_errstr,
+ "Pre-validation failed "
+ "on localhost. Please "
+ "check log file for details");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ if (op != GD_OP_MAX_OPVERSION) {
+ ret = glusterd_pre_validate_aggr_rsp_dict(op, req_dict, rsp_dict);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Pre Validation failed for "
- "operation %s on local node",
- gd_op_list[op]);
-
- if (*op_errstr == NULL) {
- ret = gf_asprintf (op_errstr,
- "Pre-validation failed "
- "on localhost. Please "
- "check log file for details");
- if (ret == -1)
- *op_errstr = NULL;
-
- ret = -1;
- }
- goto out;
- }
-
- if (op != GD_OP_MAX_OPVERSION) {
- ret = glusterd_pre_validate_aggr_rsp_dict (op, req_dict,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL, "%s",
- "Failed to aggregate response from "
- " node/brick");
- goto out;
- }
-
- dict_unref (rsp_dict);
- rsp_dict = NULL;
- }
-
- /* Sending Pre Validation req to other nodes in the cluster */
- gd_syncargs_init (&args, req_dict);
- synctask_barrier_init((&args));
- peer_cnt = 0;
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > txn_generation)
- continue;
-
- if (!peerinfo->connected)
- continue;
- if (op != GD_OP_SYNC_VOLUME &&
- peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
- continue;
-
- gd_mgmt_v3_pre_validate_req (op, req_dict, peerinfo, &args,
- MY_UUID, peer_uuid);
- peer_cnt++;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ goto out;
}
- rcu_read_unlock ();
- if (0 == peer_cnt) {
- ret = 0;
- goto out;
- }
+ dict_unref(rsp_dict);
+ rsp_dict = NULL;
+ }
+
+ /* Sending Pre Validation req to other nodes in the cluster */
+ gd_syncargs_init(&args, req_dict);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ gd_mgmt_v3_pre_validate_req(op, req_dict, peerinfo, &args, MY_UUID,
+ peer_uuid);
+ peer_cnt++;
+ }
+ rcu_read_unlock();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
- gd_synctask_barrier_wait((&args), peer_cnt);
+ gd_synctask_barrier_wait((&args), peer_cnt);
- if (args.op_ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL,
- "Pre Validation failed on peers");
+ if (args.op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Pre Validation failed on peers");
- if (args.errstr)
- *op_errstr = gf_strdup (args.errstr);
- }
+ if (args.errstr)
+ *op_errstr = gf_strdup(args.errstr);
+ }
- ret = args.op_ret;
- *op_errno = args.op_errno;
+ ret = args.op_ret;
+ *op_errno = args.op_errno;
- gf_msg_debug (this->name, 0, "Sent pre valaidation req for %s "
- "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+ gf_msg_debug(this->name, 0,
+ "Sent pre valaidation req for %s "
+ "to %d peers. Returning %d",
+ gd_op_list[op], peer_cnt, ret);
out:
- return ret;
+ return ret;
}
int
-glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
- glusterd_op_t op)
+glusterd_mgmt_v3_build_payload(dict_t **req, char **op_errstr, dict_t *dict,
+ glusterd_op_t op)
{
- int32_t ret = -1;
- dict_t *req_dict = NULL;
- xlator_t *this = NULL;
- char *volname = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (op_errstr);
- GF_ASSERT (dict);
-
- req_dict = dict_new ();
- if (!req_dict)
- goto out;
-
- switch (op) {
+ int32_t ret = -1;
+ dict_t *req_dict = NULL;
+ xlator_t *this = NULL;
+ char *volname = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(dict);
+
+ req_dict = dict_new();
+ if (!req_dict)
+ goto out;
+
+ switch (op) {
case GD_OP_MAX_OPVERSION:
case GD_OP_SNAP:
- dict_copy (dict, req_dict);
- break;
+ dict_copy(dict, req_dict);
+ break;
case GD_OP_START_VOLUME:
case GD_OP_STOP_VOLUME:
case GD_OP_ADD_BRICK:
case GD_OP_REPLACE_BRICK:
case GD_OP_RESET_BRICK:
- case GD_OP_ADD_TIER_BRICK:
- {
- ret = dict_get_strn (dict, "volname",
- SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_CRITICAL, errno,
- GD_MSG_DICT_GET_FAILED,
- "volname is not present in "
- "operation ctx");
- goto out;
- }
-
- if (strcasecmp (volname, "all")) {
- ret = glusterd_dict_set_volid (dict,
- volname,
- op_errstr);
- if (ret)
- goto out;
- }
- dict_copy (dict, req_dict);
- }
- break;
+ case GD_OP_ADD_TIER_BRICK: {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_CRITICAL, errno,
+ GD_MSG_DICT_GET_FAILED,
+ "volname is not present in "
+ "operation ctx");
+ goto out;
+ }
+
+ if (strcasecmp(volname, "all")) {
+ ret = glusterd_dict_set_volid(dict, volname, op_errstr);
+ if (ret)
+ goto out;
+ }
+ dict_copy(dict, req_dict);
+ } break;
case GD_OP_TIER_START_STOP:
case GD_OP_REMOVE_TIER_BRICK:
case GD_OP_DETACH_TIER_STATUS:
case GD_OP_TIER_STATUS:
- dict_copy (dict, req_dict);
- break;
+ dict_copy(dict, req_dict);
+ break;
default:
- break;
- }
+ break;
+ }
- *req = req_dict;
- ret = 0;
+ *req = req_dict;
+ ret = 0;
out:
- return ret;
+ return ret;
}
int32_t
-gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_brick_op_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- int32_t ret = -1;
- struct syncargs *args = NULL;
- gd1_mgmt_v3_brick_op_rsp rsp = {{0},};
- call_frame_t *frame = NULL;
- int32_t op_ret = -1;
- int32_t op_errno = -1;
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (myframe);
-
- frame = myframe;
- args = frame->local;
- peerid = frame->cookie;
- frame->local = NULL;
- frame->cookie = NULL;
-
- /* If the operation failed, then iov can be NULL. So better check the
- status of the operation and then worry about iov (if the status of
- the command is success)
- */
- if (-1 == req->rpc_status) {
- op_errno = ENOTCONN;
- goto out;
- }
-
- GF_VALIDATE_OR_GOTO_WITH_ERROR (this->name, iov, out, op_errno,
- EINVAL);
-
- ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp);
- if (ret < 0)
- goto out;
-
- gf_uuid_copy (args->uuid, rsp.uuid);
-
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ gd1_mgmt_v3_brick_op_rsp rsp = {
+ {0},
+ };
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerid = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ /* If the operation failed, then iov can be NULL. So better check the
+ status of the operation and then worry about iov (if the status of
+ the command is success)
+ */
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp);
+ if (ret < 0)
+ goto out;
+
+ gf_uuid_copy(args->uuid, rsp.uuid);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
out:
- gd_mgmt_v3_collate_errors (args, op_ret, op_errno, rsp.op_errstr,
- GLUSTERD_MGMT_V3_BRICK_OP, *peerid,
- rsp.uuid);
-
- if (rsp.op_errstr)
- free (rsp.op_errstr);
-
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- GF_FREE (peerid);
- /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
- * the caller function.
- */
- if (req->rpc_status != -1)
- STACK_DESTROY (frame->root);
- synctask_barrier_wake(args);
- return 0;
+ gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
+ GLUSTERD_MGMT_V3_BRICK_OP, *peerid, rsp.uuid);
+
+ if (rsp.op_errstr)
+ free(rsp.op_errstr);
+
+ if (rsp.dict.dict_val)
+ free(rsp.dict.dict_val);
+ GF_FREE(peerid);
+ /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
+ * the caller function.
+ */
+ if (req->rpc_status != -1)
+ STACK_DESTROY(frame->root);
+ synctask_barrier_wake(args);
+ return 0;
}
int32_t
-gd_mgmt_v3_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_brick_op_cbk_fn);
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ gd_mgmt_v3_brick_op_cbk_fn);
}
int
-gd_mgmt_v3_brick_op_req (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_brick_op_req(glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo, struct syncargs *args,
+ uuid_t my_uuid, uuid_t recv_uuid)
{
- int32_t ret = -1;
- gd1_mgmt_v3_brick_op_req req = {{0},};
- xlator_t *this = NULL;
- uuid_t *peerid = {0,};
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (op_ctx);
- GF_ASSERT (peerinfo);
- GF_ASSERT (args);
-
- ret = dict_allocate_and_serialize (op_ctx,
- &req.dict.dict_val,
- &req.dict.dict_len);
- if (ret)
- goto out;
-
- gf_uuid_copy (req.uuid, my_uuid);
- req.op = op;
-
- GD_ALLOC_COPY_UUID (peerid, peerinfo->uuid, ret);
- if (ret)
- goto out;
-
- ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerid,
- &gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_BRICK_OP,
- gd_mgmt_v3_brick_op_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_brick_op_req);
+ int32_t ret = -1;
+ gd1_mgmt_v3_brick_op_req req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ uuid_t *peerid = {
+ 0,
+ };
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ gf_uuid_copy(req.uuid, my_uuid);
+ req.op = op;
+
+ GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
+ if (ret)
+ goto out;
+
+ ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
+ &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_BRICK_OP,
+ gd_mgmt_v3_brick_op_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_req);
out:
- GF_FREE (req.dict.dict_val);
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ GF_FREE(req.dict.dict_val);
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *req_dict, char **op_errstr,
- uint32_t txn_generation)
+glusterd_mgmt_v3_brick_op(glusterd_op_t op, dict_t *req_dict, char **op_errstr,
+ uint32_t txn_generation)
{
- int32_t ret = -1;
- int32_t peer_cnt = 0;
- dict_t *rsp_dict = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- struct syncargs args = {0};
- uuid_t peer_uuid = {0};
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- GF_ASSERT (req_dict);
- GF_ASSERT (op_errstr);
-
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to create response dictionary");
- goto out;
- }
-
- /* Perform brick op on local node */
- ret = gd_mgmt_v3_brick_op_fn (op, req_dict, op_errstr,
- rsp_dict);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_OP_FAIL,
- "Brick ops failed for "
- "operation %s on local node",
- gd_op_list[op]);
-
- if (*op_errstr == NULL) {
- ret = gf_asprintf (op_errstr,
- "Brick ops failed "
- "on localhost. Please "
- "check log file for details");
- if (ret == -1)
- *op_errstr = NULL;
-
- ret = -1;
- }
- goto out;
- }
-
- dict_unref (rsp_dict);
- rsp_dict = NULL;
-
- /* Sending brick op req to other nodes in the cluster */
- gd_syncargs_init (&args, NULL);
- synctask_barrier_init((&args));
- peer_cnt = 0;
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > txn_generation)
- continue;
-
- if (!peerinfo->connected)
- continue;
- if (op != GD_OP_SYNC_VOLUME &&
- peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
- continue;
-
- gd_mgmt_v3_brick_op_req (op, req_dict, peerinfo, &args,
- MY_UUID, peer_uuid);
- peer_cnt++;
- }
- rcu_read_unlock ();
-
- if (0 == peer_cnt) {
- ret = 0;
- goto out;
- }
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_ASSERT(req_dict);
+ GF_ASSERT(op_errstr);
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Perform brick op on local node */
+ ret = gd_mgmt_v3_brick_op_fn(op, req_dict, op_errstr, rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
+ "Brick ops failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf(op_errstr,
+ "Brick ops failed "
+ "on localhost. Please "
+ "check log file for details");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ dict_unref(rsp_dict);
+ rsp_dict = NULL;
+
+ /* Sending brick op req to other nodes in the cluster */
+ gd_syncargs_init(&args, NULL);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ gd_mgmt_v3_brick_op_req(op, req_dict, peerinfo, &args, MY_UUID,
+ peer_uuid);
+ peer_cnt++;
+ }
+ rcu_read_unlock();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
- gd_synctask_barrier_wait((&args), peer_cnt);
+ gd_synctask_barrier_wait((&args), peer_cnt);
- if (args.op_ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_OP_FAIL,
- "Brick ops failed on peers");
+ if (args.op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
+ "Brick ops failed on peers");
- if (args.errstr)
- *op_errstr = gf_strdup (args.errstr);
- }
+ if (args.errstr)
+ *op_errstr = gf_strdup(args.errstr);
+ }
- ret = args.op_ret;
+ ret = args.op_ret;
- gf_msg_debug (this->name, 0, "Sent brick op req for %s "
- "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+ gf_msg_debug(this->name, 0,
+ "Sent brick op req for %s "
+ "to %d peers. Returning %d",
+ gd_op_list[op], peer_cnt, ret);
out:
- return ret;
+ return ret;
}
int32_t
-gd_mgmt_v3_commit_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_commit_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- int32_t ret = -1;
- struct syncargs *args = NULL;
- gd1_mgmt_v3_commit_rsp rsp = {{0},};
- call_frame_t *frame = NULL;
- int32_t op_ret = -1;
- int32_t op_errno = -1;
- dict_t *rsp_dict = NULL;
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (myframe);
-
- frame = myframe;
- args = frame->local;
- peerid = frame->cookie;
- frame->local = NULL;
- frame->cookie = NULL;
-
- if (-1 == req->rpc_status) {
- op_errno = ENOTCONN;
- goto out;
- }
-
- GF_VALIDATE_OR_GOTO_WITH_ERROR (this->name, iov, out, op_errno,
- EINVAL);
-
- ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp);
- if (ret < 0)
- goto out;
-
- if (rsp.dict.dict_len) {
- /* Unserialize the dictionary */
- rsp_dict = dict_new ();
-
- ret = dict_unserialize (rsp.dict.dict_val,
- rsp.dict.dict_len,
- &rsp_dict);
- if (ret < 0) {
- free (rsp.dict.dict_val);
- goto out;
- } else {
- rsp_dict->extra_stdfree = rsp.dict.dict_val;
- }
- }
-
- gf_uuid_copy (args->uuid, rsp.uuid);
- pthread_mutex_lock (&args->lock_dict);
- {
- ret = glusterd_syncop_aggr_rsp_dict (rsp.op, args->dict,
- rsp_dict);
- }
- pthread_mutex_unlock (&args->lock_dict);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_RESP_AGGR_FAIL, "%s",
- "Failed to aggregate response from "
- " node/brick");
- if (!rsp.op_ret)
- op_ret = ret;
- else {
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
- }
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ gd1_mgmt_v3_commit_rsp rsp = {
+ {0},
+ };
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerid = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp);
+ if (ret < 0)
+ goto out;
+
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new();
+
+ ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
+ if (ret < 0) {
+ free(rsp.dict.dict_val);
+ goto out;
} else {
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
- }
+ rsp_dict->extra_stdfree = rsp.dict.dict_val;
+ }
+ }
+
+ gf_uuid_copy(args->uuid, rsp.uuid);
+ pthread_mutex_lock(&args->lock_dict);
+ {
+ ret = glusterd_syncop_aggr_rsp_dict(rsp.op, args->dict, rsp_dict);
+ }
+ pthread_mutex_unlock(&args->lock_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ if (!rsp.op_ret)
+ op_ret = ret;
+ else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+ } else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
out:
- if (rsp_dict)
- dict_unref (rsp_dict);
-
- gd_mgmt_v3_collate_errors (args, op_ret, op_errno, rsp.op_errstr,
- GLUSTERD_MGMT_V3_COMMIT, *peerid, rsp.uuid);
- GF_FREE (peerid);
-
- if (rsp.op_errstr)
- free (rsp.op_errstr);
-
- /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
- * the caller function.
- */
- if (req->rpc_status != -1)
- STACK_DESTROY (frame->root);
- synctask_barrier_wake(args);
- return 0;
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+
+ gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
+ GLUSTERD_MGMT_V3_COMMIT, *peerid, rsp.uuid);
+ GF_FREE(peerid);
+
+ if (rsp.op_errstr)
+ free(rsp.op_errstr);
+
+ /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
+ * the caller function.
+ */
+ if (req->rpc_status != -1)
+ STACK_DESTROY(frame->root);
+ synctask_barrier_wake(args);
+ return 0;
}
int32_t
-gd_mgmt_v3_commit_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_commit_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_commit_cbk_fn);
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ gd_mgmt_v3_commit_cbk_fn);
}
int
-gd_mgmt_v3_commit_req (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_commit_req(glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo, struct syncargs *args,
+ uuid_t my_uuid, uuid_t recv_uuid)
{
- int32_t ret = -1;
- gd1_mgmt_v3_commit_req req = {{0},};
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (op_ctx);
- GF_ASSERT (peerinfo);
- GF_ASSERT (args);
-
- ret = dict_allocate_and_serialize (op_ctx,
- &req.dict.dict_val,
- &req.dict.dict_len);
- if (ret)
- goto out;
-
- gf_uuid_copy (req.uuid, my_uuid);
- req.op = op;
-
- GD_ALLOC_COPY_UUID (peerid, peerinfo->uuid, ret);
- if (ret)
- goto out;
-
- ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerid,
- &gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_COMMIT,
- gd_mgmt_v3_commit_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_commit_req);
+ int32_t ret = -1;
+ gd1_mgmt_v3_commit_req req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ gf_uuid_copy(req.uuid, my_uuid);
+ req.op = op;
+
+ GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
+ if (ret)
+ goto out;
+
+ ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
+ &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_COMMIT,
+ gd_mgmt_v3_commit_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_commit_req);
out:
- GF_FREE (req.dict.dict_val);
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ GF_FREE(req.dict.dict_val);
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
- char **op_errstr, uint32_t *op_errno,
- uint32_t txn_generation)
+glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, uint32_t *op_errno,
+ uint32_t txn_generation)
{
- int32_t ret = -1;
- int32_t peer_cnt = 0;
- dict_t *rsp_dict = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- struct syncargs args = {0};
- uuid_t peer_uuid = {0};
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- int32_t count = 0;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- GF_ASSERT (op_ctx);
- GF_ASSERT (req_dict);
- GF_ASSERT (op_errstr);
- GF_VALIDATE_OR_GOTO (this->name, op_errno, out);
-
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to create response dictionary");
- goto out;
- }
-
- /* Commit on local node */
- ret = gd_mgmt_v3_commit_fn (op, req_dict, op_errstr,
- op_errno, rsp_dict);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "Commit failed for "
- "operation %s on local node",
- gd_op_list[op]);
-
- if (*op_errstr == NULL) {
- ret = gf_asprintf (op_errstr,
- "Commit failed "
- "on localhost. Please "
- "check log file for details.");
- if (ret == -1)
- *op_errstr = NULL;
-
- ret = -1;
- }
- goto out;
- }
-
- ret = glusterd_syncop_aggr_rsp_dict (op, op_ctx,
- rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_RESP_AGGR_FAIL, "%s",
- "Failed to aggregate response from "
- " node/brick");
- goto out;
- }
-
-
- dict_unref (rsp_dict);
- rsp_dict = NULL;
-
- /* Sending commit req to other nodes in the cluster */
- gd_syncargs_init (&args, op_ctx);
- synctask_barrier_init((&args));
- peer_cnt = 0;
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > txn_generation)
- continue;
-
- if (!peerinfo->connected) {
- if (op == GD_OP_TIER_STATUS || op ==
- GD_OP_DETACH_TIER_STATUS) {
- ret = dict_get_int32n (args.dict, "count",
- SLEN ("count"), &count);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "failed to get index");
- count++;
- ret = dict_set_int32n (args.dict, "count",
- SLEN ("count"), count);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "failed to set index");
- }
- continue;
- }
- if (op != GD_OP_SYNC_VOLUME &&
- peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
- continue;
-
- gd_mgmt_v3_commit_req (op, req_dict, peerinfo, &args,
- MY_UUID, peer_uuid);
- peer_cnt++;
- }
- rcu_read_unlock ();
-
- if (0 == peer_cnt) {
- ret = 0;
- goto out;
- }
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int32_t count = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(req_dict);
+ GF_ASSERT(op_errstr);
+ GF_VALIDATE_OR_GOTO(this->name, op_errno, out);
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Commit on local node */
+ ret = gd_mgmt_v3_commit_fn(op, req_dict, op_errstr, op_errno, rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Commit failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf(op_errstr,
+ "Commit failed "
+ "on localhost. Please "
+ "check log file for details.");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ ret = glusterd_syncop_aggr_rsp_dict(op, op_ctx, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ goto out;
+ }
+
+ dict_unref(rsp_dict);
+ rsp_dict = NULL;
+
+ /* Sending commit req to other nodes in the cluster */
+ gd_syncargs_init(&args, op_ctx);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected) {
+ if (op == GD_OP_TIER_STATUS || op == GD_OP_DETACH_TIER_STATUS) {
+ ret = dict_get_int32n(args.dict, "count", SLEN("count"),
+ &count);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "failed to get index");
+ count++;
+ ret = dict_set_int32n(args.dict, "count", SLEN("count"), count);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "failed to set index");
+ }
+ continue;
+ }
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ gd_mgmt_v3_commit_req(op, req_dict, peerinfo, &args, MY_UUID,
+ peer_uuid);
+ peer_cnt++;
+ }
+ rcu_read_unlock();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
- gd_synctask_barrier_wait((&args), peer_cnt);
+ gd_synctask_barrier_wait((&args), peer_cnt);
- if (args.op_ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL,
- "Commit failed on peers");
+ if (args.op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Commit failed on peers");
- if (args.errstr)
- *op_errstr = gf_strdup (args.errstr);
- }
+ if (args.errstr)
+ *op_errstr = gf_strdup(args.errstr);
+ }
- ret = args.op_ret;
- *op_errno = args.op_errno;
+ ret = args.op_ret;
+ *op_errno = args.op_errno;
- gf_msg_debug (this->name, 0, "Sent commit req for %s to %d "
- "peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+ gf_msg_debug(this->name, 0,
+ "Sent commit req for %s to %d "
+ "peers. Returning %d",
+ gd_op_list[op], peer_cnt, ret);
out:
- glusterd_op_modify_op_ctx (op, op_ctx);
- return ret;
+ glusterd_op_modify_op_ctx(op, op_ctx);
+ return ret;
}
int32_t
-gd_mgmt_v3_post_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_post_validate_cbk_fn(struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- int32_t ret = -1;
- struct syncargs *args = NULL;
- gd1_mgmt_v3_post_val_rsp rsp = {{0},};
- call_frame_t *frame = NULL;
- int32_t op_ret = -1;
- int32_t op_errno = -1;
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (myframe);
-
- frame = myframe;
- args = frame->local;
- peerid = frame->cookie;
- frame->local = NULL;
- frame->cookie = NULL;
-
- if (-1 == req->rpc_status) {
- op_errno = ENOTCONN;
- goto out;
- }
-
- GF_VALIDATE_OR_GOTO_WITH_ERROR (this->name, iov, out, op_errno,
- EINVAL);
-
- ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp);
- if (ret < 0)
- goto out;
-
- gf_uuid_copy (args->uuid, rsp.uuid);
-
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ gd1_mgmt_v3_post_val_rsp rsp = {
+ {0},
+ };
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerid = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp);
+ if (ret < 0)
+ goto out;
+
+ gf_uuid_copy(args->uuid, rsp.uuid);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
out:
- gd_mgmt_v3_collate_errors (args, op_ret, op_errno, rsp.op_errstr,
- GLUSTERD_MGMT_V3_POST_VALIDATE, *peerid,
- rsp.uuid);
- if (rsp.op_errstr)
- free (rsp.op_errstr);
-
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- GF_FREE (peerid);
- /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
- * the caller function.
- */
- if (req->rpc_status != -1)
- STACK_DESTROY (frame->root);
- synctask_barrier_wake(args);
- return 0;
+ gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr,
+ GLUSTERD_MGMT_V3_POST_VALIDATE, *peerid,
+ rsp.uuid);
+ if (rsp.op_errstr)
+ free(rsp.op_errstr);
+
+ if (rsp.dict.dict_val)
+ free(rsp.dict.dict_val);
+ GF_FREE(peerid);
+ /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
+ * the caller function.
+ */
+ if (req->rpc_status != -1)
+ STACK_DESTROY(frame->root);
+ synctask_barrier_wake(args);
+ return 0;
}
int32_t
-gd_mgmt_v3_post_validate_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_post_validate_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_post_validate_cbk_fn);
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ gd_mgmt_v3_post_validate_cbk_fn);
}
int
-gd_mgmt_v3_post_validate_req (glusterd_op_t op, int32_t op_ret, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_post_validate_req(glusterd_op_t op, int32_t op_ret, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
{
- int32_t ret = -1;
- gd1_mgmt_v3_post_val_req req = {{0},};
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (op_ctx);
- GF_ASSERT (peerinfo);
- GF_ASSERT (args);
-
- ret = dict_allocate_and_serialize (op_ctx,
- &req.dict.dict_val,
- &req.dict.dict_len);
- if (ret)
- goto out;
-
- gf_uuid_copy (req.uuid, my_uuid);
- req.op = op;
- req.op_ret = op_ret;
-
- GD_ALLOC_COPY_UUID (peerid, peerinfo->uuid, ret);
- if (ret)
- goto out;
-
- ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerid,
- &gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_POST_VALIDATE,
- gd_mgmt_v3_post_validate_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_post_val_req);
+ int32_t ret = -1;
+ gd1_mgmt_v3_post_val_req req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ gf_uuid_copy(req.uuid, my_uuid);
+ req.op = op;
+ req.op_ret = op_ret;
+
+ GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
+ if (ret)
+ goto out;
+
+ ret = gd_syncop_submit_request(
+ peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog,
+ GLUSTERD_MGMT_V3_POST_VALIDATE, gd_mgmt_v3_post_validate_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_post_val_req);
out:
- GF_FREE (req.dict.dict_val);
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ GF_FREE(req.dict.dict_val);
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *dict,
- dict_t *req_dict, char **op_errstr,
- uint32_t txn_generation)
+glusterd_mgmt_v3_post_validate(glusterd_op_t op, int32_t op_ret, dict_t *dict,
+ dict_t *req_dict, char **op_errstr,
+ uint32_t txn_generation)
{
- int32_t ret = -1;
- int32_t peer_cnt = 0;
- dict_t *rsp_dict = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- struct syncargs args = {0};
- uuid_t peer_uuid = {0};
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- GF_ASSERT (dict);
- GF_VALIDATE_OR_GOTO (this->name, req_dict, out);
- GF_ASSERT (op_errstr);
-
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to create response dictionary");
- goto out;
- }
-
- /* Copy the contents of dict like missed snaps info to req_dict */
- if (op != GD_OP_REMOVE_TIER_BRICK)
- /* dict and req_dict has the same values during remove tier
- * brick (detach start) So this rewrite make the remove brick
- * id to become empty.
- * Avoiding to copy it retains the value. */
- dict_copy (dict, req_dict);
-
- /* Post Validation on local node */
- ret = gd_mgmt_v3_post_validate_fn (op, op_ret, req_dict, op_errstr,
- rsp_dict);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_POST_VALIDATION_FAIL,
- "Post Validation failed for "
- "operation %s on local node",
- gd_op_list[op]);
-
- if (*op_errstr == NULL) {
- ret = gf_asprintf (op_errstr,
- "Post-validation failed "
- "on localhost. Please check "
- "log file for details");
- if (ret == -1)
- *op_errstr = NULL;
-
- ret = -1;
- }
- goto out;
- }
-
- dict_unref (rsp_dict);
- rsp_dict = NULL;
-
- /* Sending Post Validation req to other nodes in the cluster */
- gd_syncargs_init (&args, req_dict);
- synctask_barrier_init((&args));
- peer_cnt = 0;
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > txn_generation)
- continue;
-
- if (!peerinfo->connected)
- continue;
- if (op != GD_OP_SYNC_VOLUME &&
- peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
- continue;
-
- gd_mgmt_v3_post_validate_req (op, op_ret, req_dict, peerinfo,
- &args, MY_UUID, peer_uuid);
- peer_cnt++;
- }
- rcu_read_unlock ();
-
- if (0 == peer_cnt) {
- ret = 0;
- goto out;
- }
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ dict_t *rsp_dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_ASSERT(dict);
+ GF_VALIDATE_OR_GOTO(this->name, req_dict, out);
+ GF_ASSERT(op_errstr);
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to create response dictionary");
+ goto out;
+ }
+
+ /* Copy the contents of dict like missed snaps info to req_dict */
+ if (op != GD_OP_REMOVE_TIER_BRICK)
+ /* dict and req_dict has the same values during remove tier
+ * brick (detach start) So this rewrite make the remove brick
+ * id to become empty.
+ * Avoiding to copy it retains the value. */
+ dict_copy(dict, req_dict);
+
+ /* Post Validation on local node */
+ ret = gd_mgmt_v3_post_validate_fn(op, op_ret, req_dict, op_errstr,
+ rsp_dict);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
+ "Post Validation failed for "
+ "operation %s on local node",
+ gd_op_list[op]);
+
+ if (*op_errstr == NULL) {
+ ret = gf_asprintf(op_errstr,
+ "Post-validation failed "
+ "on localhost. Please check "
+ "log file for details");
+ if (ret == -1)
+ *op_errstr = NULL;
+
+ ret = -1;
+ }
+ goto out;
+ }
+
+ dict_unref(rsp_dict);
+ rsp_dict = NULL;
+
+ /* Sending Post Validation req to other nodes in the cluster */
+ gd_syncargs_init(&args, req_dict);
+ synctask_barrier_init((&args));
+ peer_cnt = 0;
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
+
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
+
+ gd_mgmt_v3_post_validate_req(op, op_ret, req_dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
+ peer_cnt++;
+ }
+ rcu_read_unlock();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
- gd_synctask_barrier_wait((&args), peer_cnt);
+ gd_synctask_barrier_wait((&args), peer_cnt);
- if (args.op_ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_POST_VALIDATION_FAIL,
- "Post Validation failed on peers");
+ if (args.op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
+ "Post Validation failed on peers");
- if (args.errstr)
- *op_errstr = gf_strdup (args.errstr);
- }
+ if (args.errstr)
+ *op_errstr = gf_strdup(args.errstr);
+ }
- ret = args.op_ret;
+ ret = args.op_ret;
- gf_msg_debug (this->name, 0, "Sent post valaidation req for %s "
- "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+ gf_msg_debug(this->name, 0,
+ "Sent post valaidation req for %s "
+ "to %d peers. Returning %d",
+ gd_op_list[op], peer_cnt, ret);
out:
- return ret;
+ return ret;
}
int32_t
-gd_mgmt_v3_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_unlock_cbk_fn(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- int32_t ret = -1;
- struct syncargs *args = NULL;
- gd1_mgmt_v3_unlock_rsp rsp = {{0},};
- call_frame_t *frame = NULL;
- int32_t op_ret = -1;
- int32_t op_errno = -1;
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (myframe);
-
- frame = myframe;
- args = frame->local;
- peerid = frame->cookie;
- frame->local = NULL;
- frame->cookie = NULL;
-
- if (-1 == req->rpc_status) {
- op_errno = ENOTCONN;
- goto out;
- }
-
- GF_VALIDATE_OR_GOTO_WITH_ERROR (this->name, iov, out, op_errno,
- EINVAL);
-
- ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
- if (ret < 0)
- goto out;
-
- gf_uuid_copy (args->uuid, rsp.uuid);
-
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
+ int32_t ret = -1;
+ struct syncargs *args = NULL;
+ gd1_mgmt_v3_unlock_rsp rsp = {
+ {0},
+ };
+ call_frame_t *frame = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(myframe);
+
+ frame = myframe;
+ args = frame->local;
+ peerid = frame->cookie;
+ frame->local = NULL;
+ frame->cookie = NULL;
+
+ if (-1 == req->rpc_status) {
+ op_errno = ENOTCONN;
+ goto out;
+ }
+
+ GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL);
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
+ if (ret < 0)
+ goto out;
+
+ gf_uuid_copy(args->uuid, rsp.uuid);
+
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
out:
- gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_UNLOCK, *peerid, rsp.uuid);
- if (rsp.dict.dict_val)
- free (rsp.dict.dict_val);
- GF_FREE (peerid);
- /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
- * the caller function.
- */
- if (req->rpc_status != -1)
- STACK_DESTROY (frame->root);
- synctask_barrier_wake(args);
- return 0;
+ gd_mgmt_v3_collate_errors(args, op_ret, op_errno, NULL,
+ GLUSTERD_MGMT_V3_UNLOCK, *peerid, rsp.uuid);
+ if (rsp.dict.dict_val)
+ free(rsp.dict.dict_val);
+ GF_FREE(peerid);
+ /* req->rpc_status set to -1 means, STACK_DESTROY will be called from
+ * the caller function.
+ */
+ if (req->rpc_status != -1)
+ STACK_DESTROY(frame->root);
+ synctask_barrier_wake(args);
+ return 0;
}
int32_t
-gd_mgmt_v3_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
{
- return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_unlock_cbk_fn);
+ return glusterd_big_locked_cbk(req, iov, count, myframe,
+ gd_mgmt_v3_unlock_cbk_fn);
}
int
-gd_mgmt_v3_unlock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_unlock(glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo, struct syncargs *args,
+ uuid_t my_uuid, uuid_t recv_uuid)
{
- int32_t ret = -1;
- gd1_mgmt_v3_unlock_req req = {{0},};
- xlator_t *this = NULL;
- uuid_t *peerid = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (op_ctx);
- GF_ASSERT (peerinfo);
- GF_ASSERT (args);
-
- ret = dict_allocate_and_serialize (op_ctx,
- &req.dict.dict_val,
- &req.dict.dict_len);
- if (ret)
- goto out;
-
- gf_uuid_copy (req.uuid, my_uuid);
- req.op = op;
-
- GD_ALLOC_COPY_UUID (peerid, peerinfo->uuid, ret);
- if (ret)
- goto out;
-
- ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerid,
- &gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_UNLOCK,
- gd_mgmt_v3_unlock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_unlock_req);
+ int32_t ret = -1;
+ gd1_mgmt_v3_unlock_req req = {
+ {0},
+ };
+ xlator_t *this = NULL;
+ uuid_t *peerid = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(op_ctx);
+ GF_ASSERT(peerinfo);
+ GF_ASSERT(args);
+
+ ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val,
+ &req.dict.dict_len);
+ if (ret)
+ goto out;
+
+ gf_uuid_copy(req.uuid, my_uuid);
+ req.op = op;
+
+ GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret);
+ if (ret)
+ goto out;
+
+ ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid,
+ &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_UNLOCK,
+ gd_mgmt_v3_unlock_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
out:
- GF_FREE (req.dict.dict_val);
- gf_msg_trace (this->name, 0, "Returning %d", ret);
- return ret;
+ GF_FREE(req.dict.dict_val);
+ gf_msg_trace(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_mgmt_v3_release_peer_locks (glusterd_op_t op, dict_t *dict,
- int32_t op_ret, char **op_errstr,
- gf_boolean_t is_acquired,
- uint32_t txn_generation)
+glusterd_mgmt_v3_release_peer_locks(glusterd_op_t op, dict_t *dict,
+ int32_t op_ret, char **op_errstr,
+ gf_boolean_t is_acquired,
+ uint32_t txn_generation)
{
- int32_t ret = -1;
- int32_t peer_cnt = 0;
- uuid_t peer_uuid = {0};
- xlator_t *this = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- struct syncargs args = {0};
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
-
- /* If the lock has not been held during this
- * transaction, do not send unlock requests */
- if (!is_acquired)
- goto out;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ struct syncargs args = {0};
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+
+ /* If the lock has not been held during this
+ * transaction, do not send unlock requests */
+ if (!is_acquired)
+ goto out;
+
+ /* Sending mgmt_v3 unlock req to other nodes in the cluster */
+ gd_syncargs_init(&args, NULL);
+ ret = synctask_barrier_init((&args));
+ if (ret)
+ goto out;
+ peer_cnt = 0;
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > txn_generation)
+ continue;
- /* Sending mgmt_v3 unlock req to other nodes in the cluster */
- gd_syncargs_init (&args, NULL);
- ret = synctask_barrier_init((&args));
- if (ret)
- goto out;
- peer_cnt = 0;
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > txn_generation)
- continue;
-
- if (!peerinfo->connected)
- continue;
- if (op != GD_OP_SYNC_VOLUME &&
- peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
- continue;
-
- gd_mgmt_v3_unlock (op, dict, peerinfo, &args,
- MY_UUID, peer_uuid);
- peer_cnt++;
- }
- rcu_read_unlock ();
+ if (!peerinfo->connected)
+ continue;
+ if (op != GD_OP_SYNC_VOLUME &&
+ peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)
+ continue;
- if (0 == peer_cnt) {
- ret = 0;
- goto out;
- }
+ gd_mgmt_v3_unlock(op, dict, peerinfo, &args, MY_UUID, peer_uuid);
+ peer_cnt++;
+ }
+ rcu_read_unlock();
+
+ if (0 == peer_cnt) {
+ ret = 0;
+ goto out;
+ }
- gd_synctask_barrier_wait((&args), peer_cnt);
+ gd_synctask_barrier_wait((&args), peer_cnt);
- if (args.op_ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Unlock failed on peers");
+ if (args.op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Unlock failed on peers");
- if (!op_ret && args.errstr)
- *op_errstr = gf_strdup (args.errstr);
- }
+ if (!op_ret && args.errstr)
+ *op_errstr = gf_strdup(args.errstr);
+ }
- ret = args.op_ret;
+ ret = args.op_ret;
- gf_msg_debug (this->name, 0, "Sent unlock op req for %s "
- "to %d peers. Returning %d", gd_op_list[op], peer_cnt, ret);
+ gf_msg_debug(this->name, 0,
+ "Sent unlock op req for %s "
+ "to %d peers. Returning %d",
+ gd_op_list[op], peer_cnt, ret);
out:
- return ret;
+ return ret;
}
int32_t
-glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
- dict_t *dict)
+glusterd_mgmt_v3_initiate_all_phases(rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict)
{
- int32_t ret = -1;
- int32_t op_ret = -1;
- dict_t *req_dict = NULL;
- dict_t *tmp_dict = NULL;
- glusterd_conf_t *conf = NULL;
- char *op_errstr = NULL;
- xlator_t *this = NULL;
- gf_boolean_t is_acquired = _gf_false;
- uuid_t *originator_uuid = NULL;
- uint32_t txn_generation = 0;
- uint32_t op_errno = 0;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (dict);
- conf = this->private;
- GF_ASSERT (conf);
-
- /* Save the peer list generation */
- txn_generation = conf->generation;
- cmm_smp_rmb ();
- /* This read memory barrier makes sure that this assignment happens here
- * only and is not reordered and optimized by either the compiler or the
- * processor.
- */
-
- /* Save the MY_UUID as the originator_uuid. This originator_uuid
- * will be used by is_origin_glusterd() to determine if a node
- * is the originator node for a command. */
- originator_uuid = GF_MALLOC (sizeof(uuid_t),
- gf_common_mt_uuid_t);
- if (!originator_uuid) {
- ret = -1;
- goto out;
- }
-
- gf_uuid_copy (*originator_uuid, MY_UUID);
- ret = dict_set_bin (dict, "originator_uuid",
- originator_uuid, sizeof (uuid_t));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set originator_uuid.");
- GF_FREE (originator_uuid);
- goto out;
- }
-
- /* Marking the operation as complete synctasked */
- ret = dict_set_int32 (dict, "is_synctasked", _gf_true);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set synctasked flag.");
- goto out;
- }
-
- /* Use a copy at local unlock as cli response will be sent before
- * the unlock and the volname in the dict might be removed */
- tmp_dict = dict_new();
- if (!tmp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL, "Unable to create dict");
- goto out;
- }
- dict_copy (dict, tmp_dict);
-
- /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
- ret = glusterd_mgmt_v3_initiate_lockdown (op, dict, &op_errstr,
- &op_errno, &is_acquired,
- txn_generation);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCKDOWN_FAIL,
- "mgmt_v3 lockdown failed.");
- goto out;
- }
-
- /* BUILD PAYLOAD */
- ret = glusterd_mgmt_v3_build_payload (&req_dict, &op_errstr, dict, op);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_PAYLOAD_BUILD_FAIL, LOGSTR_BUILD_PAYLOAD,
- gd_op_list[op]);
- if (op_errstr == NULL)
- gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD);
- goto out;
- }
-
- /* PRE-COMMIT VALIDATE PHASE */
- ret = glusterd_mgmt_v3_pre_validate (op, req_dict, &op_errstr,
- &op_errno, txn_generation);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL, "Pre Validation Failed");
- goto out;
- }
-
- /* COMMIT OP PHASE */
- ret = glusterd_mgmt_v3_commit (op, dict, req_dict, &op_errstr,
- &op_errno, txn_generation);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL, "Commit Op Failed");
- goto out;
- }
-
- /* POST-COMMIT VALIDATE PHASE */
- /* As of now, post_validate is not trying to cleanup any failed
- commands. So as of now, I am sending 0 (op_ret as 0).
- */
- ret = glusterd_mgmt_v3_post_validate (op, 0, dict, req_dict, &op_errstr,
- txn_generation);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_POST_VALIDATION_FAIL, "Post Validation Failed");
- goto out;
- }
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ dict_t *req_dict = NULL;
+ dict_t *tmp_dict = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *op_errstr = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_acquired = _gf_false;
+ uuid_t *originator_uuid = NULL;
+ uint32_t txn_generation = 0;
+ uint32_t op_errno = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(dict);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ /* Save the peer list generation */
+ txn_generation = conf->generation;
+ cmm_smp_rmb();
+ /* This read memory barrier makes sure that this assignment happens here
+ * only and is not reordered and optimized by either the compiler or the
+ * processor.
+ */
+
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ originator_uuid = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!originator_uuid) {
+ ret = -1;
+ goto out;
+ }
+
+ gf_uuid_copy(*originator_uuid, MY_UUID);
+ ret = dict_set_bin(dict, "originator_uuid", originator_uuid,
+ sizeof(uuid_t));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set originator_uuid.");
+ GF_FREE(originator_uuid);
+ goto out;
+ }
+
+ /* Marking the operation as complete synctasked */
+ ret = dict_set_int32(dict, "is_synctasked", _gf_true);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set synctasked flag.");
+ goto out;
+ }
+
+ /* Use a copy at local unlock as cli response will be sent before
+ * the unlock and the volname in the dict might be removed */
+ tmp_dict = dict_new();
+ if (!tmp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Unable to create dict");
+ goto out;
+ }
+ dict_copy(dict, tmp_dict);
+
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
+ ret = glusterd_mgmt_v3_initiate_lockdown(op, dict, &op_errstr, &op_errno,
+ &is_acquired, txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCKDOWN_FAIL,
+ "mgmt_v3 lockdown failed.");
+ goto out;
+ }
+
+ /* BUILD PAYLOAD */
+ ret = glusterd_mgmt_v3_build_payload(&req_dict, &op_errstr, dict, op);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_PAYLOAD_BUILD_FAIL,
+ LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
+ if (op_errstr == NULL)
+ gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
+ goto out;
+ }
+
+ /* PRE-COMMIT VALIDATE PHASE */
+ ret = glusterd_mgmt_v3_pre_validate(op, req_dict, &op_errstr, &op_errno,
+ txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Pre Validation Failed");
+ goto out;
+ }
+
+ /* COMMIT OP PHASE */
+ ret = glusterd_mgmt_v3_commit(op, dict, req_dict, &op_errstr, &op_errno,
+ txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Commit Op Failed");
+ goto out;
+ }
+
+ /* POST-COMMIT VALIDATE PHASE */
+ /* As of now, post_validate is not trying to cleanup any failed
+ commands. So as of now, I am sending 0 (op_ret as 0).
+ */
+ ret = glusterd_mgmt_v3_post_validate(op, 0, dict, req_dict, &op_errstr,
+ txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
+ "Post Validation Failed");
+ goto out;
+ }
- ret = 0;
+ ret = 0;
out:
- op_ret = ret;
- /* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (op, dict, op_ret,
- &op_errstr, is_acquired,
- txn_generation);
-
- /* LOCAL VOLUME(S) UNLOCK */
- if (is_acquired) {
- /* Trying to release multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Failed to release mgmt_v3 locks on localhost");
- op_ret = ret;
- }
+ op_ret = ret;
+ /* UNLOCK PHASE FOR PEERS*/
+ (void)glusterd_mgmt_v3_release_peer_locks(op, dict, op_ret, &op_errstr,
+ is_acquired, txn_generation);
+
+ /* LOCAL VOLUME(S) UNLOCK */
+ if (is_acquired) {
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock(tmp_dict, MY_UUID);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Failed to release mgmt_v3 locks on localhost");
+ op_ret = ret;
}
+ }
- if (op_ret && (op_errno == 0))
- op_errno = EG_INTRNL;
+ if (op_ret && (op_errno == 0))
+ op_errno = EG_INTRNL;
- if (op != GD_OP_MAX_OPVERSION) {
- /* SEND CLI RESPONSE */
- glusterd_op_send_cli_response (op, op_ret, op_errno, req,
- dict, op_errstr);
- }
+ if (op != GD_OP_MAX_OPVERSION) {
+ /* SEND CLI RESPONSE */
+ glusterd_op_send_cli_response(op, op_ret, op_errno, req, dict,
+ op_errstr);
+ }
- if (req_dict)
- dict_unref (req_dict);
+ if (req_dict)
+ dict_unref(req_dict);
- if (tmp_dict)
- dict_unref (tmp_dict);
+ if (tmp_dict)
+ dict_unref(tmp_dict);
- if (op_errstr) {
- GF_FREE (op_errstr);
- op_errstr = NULL;
- }
+ if (op_errstr) {
+ GF_FREE(op_errstr);
+ op_errstr = NULL;
+ }
- return 0;
+ return 0;
}
int32_t
-glusterd_set_barrier_value (dict_t *dict, char *option)
+glusterd_set_barrier_value(dict_t *dict, char *option)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_volinfo_t *vol = NULL;
- char *volname = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (dict);
- GF_ASSERT (option);
-
- /* TODO : Change this when we support multiple volume.
- * As of now only snapshot of single volume is supported,
- * Hence volname1 is directly fetched
- */
- ret = dict_get_strn (dict, "volname1", SLEN ("volname1"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Volname not present in "
- "dict");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &vol);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "Volume %s not found ",
- volname);
- goto out;
- }
-
- ret = dict_set_dynstr_with_alloc (dict, "barrier", option);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to set barrier op "
- "in request dictionary");
- goto out;
- }
-
- ret = dict_set_dynstr_with_alloc (vol->dict, "features.barrier",
- option);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to set barrier op "
- "in volume option dict");
- goto out;
- }
-
- gd_update_volume_op_versions (vol);
-
- ret = glusterd_create_volfiles (vol);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Failed to create volfiles");
- goto out;
- }
-
- ret = glusterd_store_volinfo (vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_volinfo_t *vol = NULL;
+ char *volname = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(dict);
+ GF_ASSERT(option);
+
+ /* TODO : Change this when we support multiple volume.
+ * As of now only snapshot of single volume is supported,
+ * Hence volname1 is directly fetched
+ */
+ ret = dict_get_strn(dict, "volname1", SLEN("volname1"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Volname not present in "
+ "dict");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &vol);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Volume %s not found ", volname);
+ goto out;
+ }
+
+ ret = dict_set_dynstr_with_alloc(dict, "barrier", option);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set barrier op "
+ "in request dictionary");
+ goto out;
+ }
+
+ ret = dict_set_dynstr_with_alloc(vol->dict, "features.barrier", option);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set barrier op "
+ "in volume option dict");
+ goto out;
+ }
+
+ gd_update_volume_op_versions(vol);
+
+ ret = glusterd_create_volfiles(vol);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Failed to create volfiles");
+ goto out;
+ }
+
+ ret = glusterd_store_volinfo(vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int32_t
-glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
- dict_t *dict)
+glusterd_mgmt_v3_initiate_snap_phases(rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict)
{
- int32_t ret = -1;
- int32_t op_ret = -1;
- dict_t *req_dict = NULL;
- dict_t *tmp_dict = NULL;
- glusterd_conf_t *conf = NULL;
- char *op_errstr = NULL;
- xlator_t *this = NULL;
- gf_boolean_t is_acquired = _gf_false;
- uuid_t *originator_uuid = NULL;
- gf_boolean_t success = _gf_false;
- char *cli_errstr = NULL;
- uint32_t txn_generation = 0;
- uint32_t op_errno = 0;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (req);
- GF_ASSERT (dict);
- conf = this->private;
- GF_ASSERT (conf);
-
- /* Save the peer list generation */
- txn_generation = conf->generation;
- cmm_smp_rmb ();
- /* This read memory barrier makes sure that this assignment happens here
- * only and is not reordered and optimized by either the compiler or the
- * processor.
- */
-
- /* Save the MY_UUID as the originator_uuid. This originator_uuid
- * will be used by is_origin_glusterd() to determine if a node
- * is the originator node for a command. */
- originator_uuid = GF_MALLOC (sizeof(uuid_t),
- gf_common_mt_uuid_t);
- if (!originator_uuid) {
- ret = -1;
- goto out;
- }
-
- gf_uuid_copy (*originator_uuid, MY_UUID);
- ret = dict_set_bin (dict, "originator_uuid",
- originator_uuid, sizeof (uuid_t));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set originator_uuid.");
- GF_FREE (originator_uuid);
- goto out;
- }
-
- /* Marking the operation as complete synctasked */
- ret = dict_set_int32n (dict, "is_synctasked",
- SLEN ("is_synctasked"), _gf_true);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set synctasked flag.");
- goto out;
- }
-
- /* Use a copy at local unlock as cli response will be sent before
- * the unlock and the volname in the dict might be removed */
- tmp_dict = dict_new();
- if (!tmp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_CREATE_FAIL, "Unable to create dict");
- goto out;
- }
- dict_copy (dict, tmp_dict);
-
- /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
- ret = glusterd_mgmt_v3_initiate_lockdown (op, dict, &op_errstr,
- &op_errno, &is_acquired,
- txn_generation);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCKDOWN_FAIL,
- "mgmt_v3 lockdown failed.");
- goto out;
- }
-
- /* BUILD PAYLOAD */
- ret = glusterd_mgmt_v3_build_payload (&req_dict, &op_errstr, dict, op);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_PAYLOAD_BUILD_FAIL, LOGSTR_BUILD_PAYLOAD,
- gd_op_list[op]);
- if (op_errstr == NULL)
- gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD);
- goto out;
- }
-
- /* PRE-COMMIT VALIDATE PHASE */
- ret = glusterd_mgmt_v3_pre_validate (op, req_dict, &op_errstr,
- &op_errno, txn_generation);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL, "Pre Validation Failed");
- goto out;
- }
-
- /* quorum check of the volume is done here */
- ret = glusterd_snap_quorum_check (req_dict, _gf_false, &op_errstr,
- &op_errno);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_QUORUM_CHECK_FAIL, "Volume quorum check failed");
- goto out;
- }
-
- /* Set the operation type as pre, so that differentiation can be
- * made whether the brickop is sent during pre-commit or post-commit
- */
- ret = dict_set_dynstr_with_alloc (req_dict, "operation-type", "pre");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to set "
- "operation-type in dictionary");
- goto out;
- }
-
- ret = glusterd_mgmt_v3_brick_op (op, req_dict, &op_errstr,
- txn_generation);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_OP_FAIL, "Brick Ops Failed");
- goto unbarrier;
- }
-
- /* COMMIT OP PHASE */
- /* TODO: As of now, the plan is to do quorum check before sending the
- commit fop and if the quorum succeeds, then commit is sent to all
- the other glusterds.
- snap create functionality now creates the in memory and on disk
- objects for the snapshot (marking them as incomplete), takes the lvm
- snapshot and then updates the status of the in memory and on disk
- snap objects as complete. Suppose one of the glusterds goes down
- after taking the lvm snapshot, but before updating the snap object,
- then treat it as a snapshot create failure and trigger cleanup.
- i.e the number of commit responses received by the originator
- glusterd shold be the same as the number of peers it has sent the
- request to (i.e npeers variable). If not, then originator glusterd
- will initiate cleanup in post-validate fop.
- Question: What if one of the other glusterds goes down as explained
- above and along with it the originator glusterd also goes down?
- Who will initiate the cleanup?
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ dict_t *req_dict = NULL;
+ dict_t *tmp_dict = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *op_errstr = NULL;
+ xlator_t *this = NULL;
+ gf_boolean_t is_acquired = _gf_false;
+ uuid_t *originator_uuid = NULL;
+ gf_boolean_t success = _gf_false;
+ char *cli_errstr = NULL;
+ uint32_t txn_generation = 0;
+ uint32_t op_errno = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(req);
+ GF_ASSERT(dict);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ /* Save the peer list generation */
+ txn_generation = conf->generation;
+ cmm_smp_rmb();
+ /* This read memory barrier makes sure that this assignment happens here
+ * only and is not reordered and optimized by either the compiler or the
+ * processor.
+ */
+
+ /* Save the MY_UUID as the originator_uuid. This originator_uuid
+ * will be used by is_origin_glusterd() to determine if a node
+ * is the originator node for a command. */
+ originator_uuid = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!originator_uuid) {
+ ret = -1;
+ goto out;
+ }
+
+ gf_uuid_copy(*originator_uuid, MY_UUID);
+ ret = dict_set_bin(dict, "originator_uuid", originator_uuid,
+ sizeof(uuid_t));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set originator_uuid.");
+ GF_FREE(originator_uuid);
+ goto out;
+ }
+
+ /* Marking the operation as complete synctasked */
+ ret = dict_set_int32n(dict, "is_synctasked", SLEN("is_synctasked"),
+ _gf_true);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set synctasked flag.");
+ goto out;
+ }
+
+ /* Use a copy at local unlock as cli response will be sent before
+ * the unlock and the volname in the dict might be removed */
+ tmp_dict = dict_new();
+ if (!tmp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL,
+ "Unable to create dict");
+ goto out;
+ }
+ dict_copy(dict, tmp_dict);
+
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
+ ret = glusterd_mgmt_v3_initiate_lockdown(op, dict, &op_errstr, &op_errno,
+ &is_acquired, txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCKDOWN_FAIL,
+ "mgmt_v3 lockdown failed.");
+ goto out;
+ }
+
+ /* BUILD PAYLOAD */
+ ret = glusterd_mgmt_v3_build_payload(&req_dict, &op_errstr, dict, op);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_PAYLOAD_BUILD_FAIL,
+ LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
+ if (op_errstr == NULL)
+ gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
+ goto out;
+ }
+
+ /* PRE-COMMIT VALIDATE PHASE */
+ ret = glusterd_mgmt_v3_pre_validate(op, req_dict, &op_errstr, &op_errno,
+ txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Pre Validation Failed");
+ goto out;
+ }
+
+ /* quorum check of the volume is done here */
+ ret = glusterd_snap_quorum_check(req_dict, _gf_false, &op_errstr,
+ &op_errno);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_QUORUM_CHECK_FAIL,
+ "Volume quorum check failed");
+ goto out;
+ }
+
+ /* Set the operation type as pre, so that differentiation can be
+ * made whether the brickop is sent during pre-commit or post-commit
+ */
+ ret = dict_set_dynstr_with_alloc(req_dict, "operation-type", "pre");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set "
+ "operation-type in dictionary");
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_brick_op(op, req_dict, &op_errstr, txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
+ "Brick Ops Failed");
+ goto unbarrier;
+ }
+
+ /* COMMIT OP PHASE */
+ /* TODO: As of now, the plan is to do quorum check before sending the
+ commit fop and if the quorum succeeds, then commit is sent to all
+ the other glusterds.
+ snap create functionality now creates the in memory and on disk
+ objects for the snapshot (marking them as incomplete), takes the lvm
+ snapshot and then updates the status of the in memory and on disk
+ snap objects as complete. Suppose one of the glusterds goes down
+ after taking the lvm snapshot, but before updating the snap object,
+ then treat it as a snapshot create failure and trigger cleanup.
+ i.e the number of commit responses received by the originator
+ glusterd shold be the same as the number of peers it has sent the
+ request to (i.e npeers variable). If not, then originator glusterd
+ will initiate cleanup in post-validate fop.
+ Question: What if one of the other glusterds goes down as explained
+ above and along with it the originator glusterd also goes down?
+ Who will initiate the cleanup?
+ */
+ ret = dict_set_int32n(req_dict, "cleanup", SLEN("cleanup"), 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set dict");
+ goto unbarrier;
+ }
+
+ ret = glusterd_mgmt_v3_commit(op, dict, req_dict, &op_errstr, &op_errno,
+ txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Commit Op Failed");
+ /* If the main op fails, we should save the error string.
+ Because, op_errstr will be used for unbarrier and
+ unlock ops also. We might lose the actual error that
+ caused the failure.
*/
- ret = dict_set_int32n (req_dict, "cleanup", SLEN ("cleanup"), 1);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "failed to set dict");
- goto unbarrier;
- }
+ cli_errstr = op_errstr;
+ op_errstr = NULL;
+ goto unbarrier;
+ }
- ret = glusterd_mgmt_v3_commit (op, dict, req_dict, &op_errstr,
- &op_errno, txn_generation);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL, "Commit Op Failed");
- /* If the main op fails, we should save the error string.
- Because, op_errstr will be used for unbarrier and
- unlock ops also. We might lose the actual error that
- caused the failure.
- */
- cli_errstr = op_errstr;
- op_errstr = NULL;
- goto unbarrier;
- }
-
- success = _gf_true;
+ success = _gf_true;
unbarrier:
- /* Set the operation type as post, so that differentiation can be
- * made whether the brickop is sent during pre-commit or post-commit
- */
- ret = dict_set_dynstr_with_alloc (req_dict, "operation-type", "post");
+ /* Set the operation type as post, so that differentiation can be
+ * made whether the brickop is sent during pre-commit or post-commit
+ */
+ ret = dict_set_dynstr_with_alloc(req_dict, "operation-type", "post");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set "
+ "operation-type in dictionary");
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_brick_op(op, req_dict, &op_errstr, txn_generation);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
+ "Brick Ops Failed");
+ goto out;
+ }
+
+ /*Do a quorum check if the commit phase is successful*/
+ if (success) {
+ // quorum check of the snapshot volume
+ ret = glusterd_snap_quorum_check(dict, _gf_true, &op_errstr, &op_errno);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Failed to set "
- "operation-type in dictionary");
- goto out;
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_QUORUM_CHECK_FAIL,
+ "Snapshot Volume quorum check failed");
+ goto out;
}
+ }
- ret = glusterd_mgmt_v3_brick_op (op, req_dict, &op_errstr,
- txn_generation);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_OP_FAIL, "Brick Ops Failed");
- goto out;
- }
-
- /*Do a quorum check if the commit phase is successful*/
- if (success) {
- //quorum check of the snapshot volume
- ret = glusterd_snap_quorum_check (dict, _gf_true, &op_errstr,
- &op_errno);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_QUORUM_CHECK_FAIL,
- "Snapshot Volume quorum check failed");
- goto out;
- }
- }
-
- ret = 0;
+ ret = 0;
out:
- op_ret = ret;
+ op_ret = ret;
- if (success == _gf_false)
- op_ret = -1;
+ if (success == _gf_false)
+ op_ret = -1;
- /* POST-COMMIT VALIDATE PHASE */
- ret = glusterd_mgmt_v3_post_validate (op, op_ret, dict, req_dict,
- &op_errstr, txn_generation);
+ /* POST-COMMIT VALIDATE PHASE */
+ ret = glusterd_mgmt_v3_post_validate(op, op_ret, dict, req_dict, &op_errstr,
+ txn_generation);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
+ "Post Validation Failed");
+ op_ret = -1;
+ }
+
+ /* UNLOCK PHASE FOR PEERS*/
+ (void)glusterd_mgmt_v3_release_peer_locks(op, dict, op_ret, &op_errstr,
+ is_acquired, txn_generation);
+
+ /* If the commit op (snapshot taking) failed, then the error is stored
+ in cli_errstr and unbarrier is called. Suppose, if unbarrier also
+ fails, then the error happened in unbarrier is logged and freed.
+ The error happened in commit op, which is stored in cli_errstr
+ is sent to cli.
+ */
+ if (cli_errstr) {
+ GF_FREE(op_errstr);
+ op_errstr = NULL;
+ op_errstr = cli_errstr;
+ }
+
+ /* LOCAL VOLUME(S) UNLOCK */
+ if (is_acquired) {
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock(tmp_dict, MY_UUID);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PRE_VALIDATION_FAIL, "Post Validation Failed");
- op_ret = -1;
- }
-
- /* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (op, dict, op_ret,
- &op_errstr, is_acquired,
- txn_generation);
-
- /* If the commit op (snapshot taking) failed, then the error is stored
- in cli_errstr and unbarrier is called. Suppose, if unbarrier also
- fails, then the error happened in unbarrier is logged and freed.
- The error happened in commit op, which is stored in cli_errstr
- is sent to cli.
- */
- if (cli_errstr) {
- GF_FREE (op_errstr);
- op_errstr = NULL;
- op_errstr = cli_errstr;
- }
-
- /* LOCAL VOLUME(S) UNLOCK */
- if (is_acquired) {
- /* Trying to release multiple mgmt_v3 locks */
- ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Failed to release mgmt_v3 locks on localhost");
- op_ret = ret;
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Failed to release mgmt_v3 locks on localhost");
+ op_ret = ret;
}
+ }
- if (op_ret && (op_errno == 0))
- op_errno = EG_INTRNL;
+ if (op_ret && (op_errno == 0))
+ op_errno = EG_INTRNL;
- /* SEND CLI RESPONSE */
- glusterd_op_send_cli_response (op, op_ret, op_errno, req,
- dict, op_errstr);
+ /* SEND CLI RESPONSE */
+ glusterd_op_send_cli_response(op, op_ret, op_errno, req, dict, op_errstr);
- if (req_dict)
- dict_unref (req_dict);
+ if (req_dict)
+ dict_unref(req_dict);
- if (tmp_dict)
- dict_unref (tmp_dict);
+ if (tmp_dict)
+ dict_unref(tmp_dict);
- if (op_errstr) {
- GF_FREE (op_errstr);
- op_errstr = NULL;
- }
+ if (op_errstr) {
+ GF_FREE(op_errstr);
+ op_errstr = NULL;
+ }
- return 0;
+ return 0;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
index 4678d39d9ce..356a4bcca67 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c
@@ -31,718 +31,712 @@
#include "glusterd-messages.h"
static int
-seq_dict_foreach (dict_t *dict,
- int (*fn)(char *str, void *data),
- void *data)
+seq_dict_foreach(dict_t *dict, int (*fn)(char *str, void *data), void *data)
{
- char index[] = "4294967296"; // 1<<32
- int i = 0;
- char *val = NULL;
- int ret = 0;
-
- for (;;i++) {
- snprintf(index, sizeof(index), "%d", i);
- ret = dict_get_str (dict, index, &val);
- if (ret != 0)
- return ret == -ENOENT ? 0 : ret;
- ret = fn (val, data);
- if (ret != 0)
- return ret;
- }
+ char index[] = "4294967296"; // 1<<32
+ int i = 0;
+ char *val = NULL;
+ int ret = 0;
+
+ for (;; i++) {
+ snprintf(index, sizeof(index), "%d", i);
+ ret = dict_get_str(dict, index, &val);
+ if (ret != 0)
+ return ret == -ENOENT ? 0 : ret;
+ ret = fn(val, data);
+ if (ret != 0)
+ return ret;
+ }
}
int
-parse_mount_pattern_desc (gf_mount_spec_t *mspec, char *pdesc)
+parse_mount_pattern_desc(gf_mount_spec_t *mspec, char *pdesc)
#define SYNTAX_ERR -2
{
- char *curs = NULL;
- char *c2 = NULL;
- char sc = '\0';
- char **cc = NULL;
- gf_mount_pattern_t *pat = NULL;
- int pnum = 0;
- int ret = 0;
- int lastsup = -1;
- int incl = -1;
- char **pcc = NULL;
- int pnc = 0;
-
- skipwhite (&pdesc);
-
- /* a bow to theory */
- if (!*pdesc)
- return 0;
-
- /* count number of components, separated by '&' */
- mspec->len = 0;
- for (curs = pdesc; *curs; curs++) {
- if (*curs == ')')
- mspec->len++;
+ char *curs = NULL;
+ char *c2 = NULL;
+ char sc = '\0';
+ char **cc = NULL;
+ gf_mount_pattern_t *pat = NULL;
+ int pnum = 0;
+ int ret = 0;
+ int lastsup = -1;
+ int incl = -1;
+ char **pcc = NULL;
+ int pnc = 0;
+
+ skipwhite(&pdesc);
+
+ /* a bow to theory */
+ if (!*pdesc)
+ return 0;
+
+ /* count number of components, separated by '&' */
+ mspec->len = 0;
+ for (curs = pdesc; *curs; curs++) {
+ if (*curs == ')')
+ mspec->len++;
+ }
+
+ mspec->patterns = GF_CALLOC(mspec->len, sizeof(*mspec->patterns),
+ gf_gld_mt_mount_pattern);
+ if (!mspec->patterns) {
+ ret = -1;
+ goto out;
+ }
+
+ pat = mspec->patterns;
+ curs = pdesc;
+ skipwhite(&curs);
+ for (;;) {
+ incl = -1;
+
+ /* check for pattern signedness modifier */
+ if (*curs == '-') {
+ pat->negative = _gf_true;
+ curs++;
}
- mspec->patterns = GF_CALLOC (mspec->len, sizeof (*mspec->patterns),
- gf_gld_mt_mount_pattern);
- if (!mspec->patterns) {
- ret = -1;
- goto out;
+ /* now should come condition specifier,
+ * then opening paren
+ */
+ c2 = nwstrtail(curs, "SUB(");
+ if (c2) {
+ pat->condition = SET_SUB;
+ goto got_cond;
+ }
+ c2 = nwstrtail(curs, "SUP(");
+ if (c2) {
+ pat->condition = SET_SUPER;
+ lastsup = pat - mspec->patterns;
+ goto got_cond;
+ }
+ c2 = nwstrtail(curs, "EQL(");
+ if (c2) {
+ pat->condition = SET_EQUAL;
+ goto got_cond;
+ }
+ c2 = nwstrtail(curs, "MEET(");
+ if (c2) {
+ pat->condition = SET_INTERSECT;
+ goto got_cond;
+ }
+ c2 = nwstrtail(curs, "SUB+(");
+ if (c2) {
+ pat->condition = SET_SUB;
+ incl = lastsup;
+ goto got_cond;
}
- pat = mspec->patterns;
- curs = pdesc;
- skipwhite (&curs);
- for (;;) {
- incl = -1;
-
- /* check for pattern signedness modifier */
- if (*curs == '-') {
- pat->negative = _gf_true;
- curs++;
- }
-
- /* now should come condition specifier,
- * then opening paren
- */
- c2 = nwstrtail (curs, "SUB(");
- if (c2) {
- pat->condition = SET_SUB;
- goto got_cond;
- }
- c2 = nwstrtail (curs, "SUP(");
- if (c2) {
- pat->condition = SET_SUPER;
- lastsup = pat - mspec->patterns;
- goto got_cond;
- }
- c2 = nwstrtail (curs, "EQL(");
- if (c2) {
- pat->condition = SET_EQUAL;
- goto got_cond;
- }
- c2 = nwstrtail (curs, "MEET(");
- if (c2) {
- pat->condition = SET_INTERSECT;
- goto got_cond;
- }
- c2 = nwstrtail (curs, "SUB+(");
- if (c2) {
- pat->condition = SET_SUB;
- incl = lastsup;
- goto got_cond;
- }
+ ret = SYNTAX_ERR;
+ goto out;
+ got_cond:
+ curs = c2;
+ skipwhite(&curs);
+ /* count the number of components for pattern */
+ pnum = *curs == ')' ? 0 : 1;
+ for (c2 = curs; *c2 != ')';) {
+ if (strchr("&|", *c2)) {
ret = SYNTAX_ERR;
goto out;
+ }
+ while (!strchr("|&)", *c2) && !isspace(*c2))
+ c2++;
+ skipwhite(&c2);
+ switch (*c2) {
+ case ')':
+ break;
+ case '\0':
+ case '&':
+ ret = SYNTAX_ERR;
+ goto out;
+ case '|':
+ *c2 = ' ';
+ skipwhite(&c2);
+ /* fall through */
+ default:
+ pnum++;
+ }
+ }
+ if (incl >= 0) {
+ pnc = 0;
+ for (pcc = mspec->patterns[incl].components; *pcc; pcc++)
+ pnc++;
+ pnum += pnc;
+ }
+ pat->components = GF_CALLOC(pnum + 1, sizeof(*pat->components),
+ gf_gld_mt_mount_comp_container);
+ if (!pat->components) {
+ ret = -1;
+ goto out;
+ }
- got_cond:
- curs = c2;
- skipwhite (&curs);
- /* count the number of components for pattern */
- pnum = *curs == ')' ? 0 : 1;
- for (c2 = curs ;*c2 != ')';) {
- if (strchr ("&|", *c2)) {
- ret = SYNTAX_ERR;
- goto out;
- }
- while (!strchr ("|&)", *c2) && !isspace (*c2))
- c2++;
- skipwhite (&c2);
- switch (*c2) {
- case ')':
- break;
- case '\0':
- case '&':
- ret = SYNTAX_ERR;
- goto out;
- case '|':
- *c2 = ' ';
- skipwhite (&c2);
- /* fall through */
- default:
- pnum++;
- }
- }
- if (incl >= 0) {
- pnc = 0;
- for (pcc = mspec->patterns[incl].components; *pcc; pcc++)
- pnc++;
- pnum += pnc;
- }
- pat->components = GF_CALLOC (pnum + 1, sizeof (*pat->components),
- gf_gld_mt_mount_comp_container);
- if (!pat->components) {
- ret = -1;
- goto out;
- }
-
- cc = pat->components;
- /* copy over included component set */
- if (incl >= 0) {
- memcpy (pat->components,
- mspec->patterns[incl].components,
- pnc * sizeof (*pat->components));
- cc += pnc;
- }
- /* parse and add components */
- c2 = ""; /* reset c2 */
- while (*c2 != ')') {
- c2 = curs;
- while (!isspace (*c2) && *c2 != ')')
- c2++;
- sc = *c2;
- *c2 = '\0';;
- *cc = gf_strdup (curs);
- if (!*cc) {
- ret = -1;
- goto out;
- }
- *c2 = sc;
- skipwhite (&c2);
- curs = c2;
- cc++;
- }
-
- curs++;
- skipwhite (&curs);
- if (*curs == '&') {
- curs++;
- skipwhite (&curs);
- }
-
- if (!*curs)
- break;
- pat++;
+ cc = pat->components;
+ /* copy over included component set */
+ if (incl >= 0) {
+ memcpy(pat->components, mspec->patterns[incl].components,
+ pnc * sizeof(*pat->components));
+ cc += pnc;
+ }
+ /* parse and add components */
+ c2 = ""; /* reset c2 */
+ while (*c2 != ')') {
+ c2 = curs;
+ while (!isspace(*c2) && *c2 != ')')
+ c2++;
+ sc = *c2;
+ *c2 = '\0';
+ ;
+ *cc = gf_strdup(curs);
+ if (!*cc) {
+ ret = -1;
+ goto out;
+ }
+ *c2 = sc;
+ skipwhite(&c2);
+ curs = c2;
+ cc++;
}
- out:
- if (ret == SYNTAX_ERR) {
- gf_msg ("glusterd", GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "cannot parse mount patterns %s",
- pdesc);
+ curs++;
+ skipwhite(&curs);
+ if (*curs == '&') {
+ curs++;
+ skipwhite(&curs);
}
- /* We've allocted a lotta stuff here but don't bother with freeing
- * on error, in that case we'll terminate anyway
- */
- return ret ? -1 : 0;
+ if (!*curs)
+ break;
+ pat++;
+ }
+
+out:
+ if (ret == SYNTAX_ERR) {
+ gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
+ "cannot parse mount patterns %s", pdesc);
+ }
+
+ /* We've allocted a lotta stuff here but don't bother with freeing
+ * on error, in that case we'll terminate anyway
+ */
+ return ret ? -1 : 0;
}
#undef SYNTAX_ERR
-
const char *georep_mnt_desc_template =
- "SUP("
- "aux-gfid-mount "
- "acl "
- "volfile-server=localhost "
- "client-pid=%d "
- "user-map-root=%s "
- ")"
- "SUB+("
- "log-file="DEFAULT_LOG_FILE_DIRECTORY"/"GEOREP"*/* "
- "log-level=* "
- "volfile-id=* "
- ")"
- "MEET("
- "%s"
- ")";
+ "SUP("
+ "aux-gfid-mount "
+ "acl "
+ "volfile-server=localhost "
+ "client-pid=%d "
+ "user-map-root=%s "
+ ")"
+ "SUB+("
+ "log-file=" DEFAULT_LOG_FILE_DIRECTORY "/" GEOREP
+ "*/* "
+ "log-level=* "
+ "volfile-id=* "
+ ")"
+ "MEET("
+ "%s"
+ ")";
const char *hadoop_mnt_desc_template =
- "SUP("
- "volfile-server=%s "
- "client-pid=%d "
- "volfile-id=%s "
- "user-map-root=%s "
- ")"
- "SUB+("
- "log-file="DEFAULT_LOG_FILE_DIRECTORY"/"GHADOOP"*/* "
- "log-level=* "
- ")";
+ "SUP("
+ "volfile-server=%s "
+ "client-pid=%d "
+ "volfile-id=%s "
+ "user-map-root=%s "
+ ")"
+ "SUB+("
+ "log-file=" DEFAULT_LOG_FILE_DIRECTORY "/" GHADOOP
+ "*/* "
+ "log-level=* "
+ ")";
int
-make_georep_mountspec (gf_mount_spec_t *mspec, const char *volnames,
- char *user)
+make_georep_mountspec(gf_mount_spec_t *mspec, const char *volnames, char *user)
{
- char *georep_mnt_desc = NULL;
- char *meetspec = NULL;
- char *vols = NULL;
- char *vol = NULL;
- char *p = NULL;
- char *savetok = NULL;
- char *fa[3] = {0,};
- size_t siz = 0;
- int vc = 0;
- int i = 0;
- int ret = 0;
-
- vols = gf_strdup ((char *)volnames);
- if (!vols)
- goto out;
-
- for (vc = 1, p = vols; *p; p++) {
- if (*p == ',')
- vc++;
+ char *georep_mnt_desc = NULL;
+ char *meetspec = NULL;
+ char *vols = NULL;
+ char *vol = NULL;
+ char *p = NULL;
+ char *savetok = NULL;
+ char *fa[3] = {
+ 0,
+ };
+ size_t siz = 0;
+ int vc = 0;
+ int i = 0;
+ int ret = 0;
+
+ vols = gf_strdup((char *)volnames);
+ if (!vols)
+ goto out;
+
+ for (vc = 1, p = vols; *p; p++) {
+ if (*p == ',')
+ vc++;
+ }
+ siz = strlen(volnames) + vc * SLEN("volfile-id=");
+ meetspec = GF_CALLOC(1, siz + 1, gf_gld_mt_georep_meet_spec);
+ if (!meetspec)
+ goto out;
+
+ for (p = vols;;) {
+ vol = strtok_r(p, ",", &savetok);
+ if (!vol) {
+ GF_ASSERT(vc == 0);
+ break;
}
- siz = strlen (volnames) + vc * SLEN ("volfile-id=");
- meetspec = GF_CALLOC (1, siz + 1, gf_gld_mt_georep_meet_spec);
- if (!meetspec)
- goto out;
-
- for (p = vols;;) {
- vol = strtok_r (p, ",", &savetok);
- if (!vol) {
- GF_ASSERT (vc == 0);
- break;
- }
- p = NULL;
- strcat (meetspec, "volfile-id=");
- strcat (meetspec, vol);
- if (--vc > 0)
- strcat (meetspec, " ");
- }
-
- ret = gf_asprintf (&georep_mnt_desc, georep_mnt_desc_template,
- GF_CLIENT_PID_GSYNCD, user, meetspec);
- if (ret == -1) {
- georep_mnt_desc = NULL;
- goto out;
- }
-
- ret = parse_mount_pattern_desc (mspec, georep_mnt_desc);
-
- out:
- fa[0] = meetspec;
- fa[1] = vols;
- fa[2] = georep_mnt_desc;
-
- for (i = 0; i < 3; i++) {
- if (fa[i] == NULL)
- ret = -1;
- else
- GF_FREE (fa[i]);
- }
-
- return ret;
+ p = NULL;
+ strcat(meetspec, "volfile-id=");
+ strcat(meetspec, vol);
+ if (--vc > 0)
+ strcat(meetspec, " ");
+ }
+
+ ret = gf_asprintf(&georep_mnt_desc, georep_mnt_desc_template,
+ GF_CLIENT_PID_GSYNCD, user, meetspec);
+ if (ret == -1) {
+ georep_mnt_desc = NULL;
+ goto out;
+ }
+
+ ret = parse_mount_pattern_desc(mspec, georep_mnt_desc);
+
+out:
+ fa[0] = meetspec;
+ fa[1] = vols;
+ fa[2] = georep_mnt_desc;
+
+ for (i = 0; i < 3; i++) {
+ if (fa[i] == NULL)
+ ret = -1;
+ else
+ GF_FREE(fa[i]);
+ }
+
+ return ret;
}
int
-make_ghadoop_mountspec (gf_mount_spec_t *mspec, const char *volname,
- char *user, char *server)
+make_ghadoop_mountspec(gf_mount_spec_t *mspec, const char *volname, char *user,
+ char *server)
{
- char *hadoop_mnt_desc = NULL;
- int ret = 0;
+ char *hadoop_mnt_desc = NULL;
+ int ret = 0;
- ret = gf_asprintf (&hadoop_mnt_desc, hadoop_mnt_desc_template,
- server, GF_CLIENT_PID_HADOOP, volname, user);
- if (ret == -1)
- return ret;
+ ret = gf_asprintf(&hadoop_mnt_desc, hadoop_mnt_desc_template, server,
+ GF_CLIENT_PID_HADOOP, volname, user);
+ if (ret == -1)
+ return ret;
- return parse_mount_pattern_desc (mspec, hadoop_mnt_desc);
+ return parse_mount_pattern_desc(mspec, hadoop_mnt_desc);
}
static gf_boolean_t
-match_comp (char *str, char *patcomp)
+match_comp(char *str, char *patcomp)
{
- char *c1 = patcomp;
- char *c2 = str;
-
- GF_ASSERT (c1);
- GF_ASSERT (c2);
-
- while (*c1 == *c2) {
- if (!*c1)
- return _gf_true;
- c1++;
- c2++;
- if (c1[-1] == '=')
- break;
- }
-
- return fnmatch (c1, c2, 0) == 0 ? _gf_true : _gf_false;
+ char *c1 = patcomp;
+ char *c2 = str;
+
+ GF_ASSERT(c1);
+ GF_ASSERT(c2);
+
+ while (*c1 == *c2) {
+ if (!*c1)
+ return _gf_true;
+ c1++;
+ c2++;
+ if (c1[-1] == '=')
+ break;
+ }
+
+ return fnmatch(c1, c2, 0) == 0 ? _gf_true : _gf_false;
}
struct gf_set_descriptor {
- gf_boolean_t priv[2];
- gf_boolean_t common;
+ gf_boolean_t priv[2];
+ gf_boolean_t common;
};
static int
-_gf_set_dict_iter1 (char *val, void *data)
+_gf_set_dict_iter1(char *val, void *data)
{
- void **dataa = data;
- struct gf_set_descriptor *sd = dataa[0];
- char **curs = dataa[1];
- gf_boolean_t priv = _gf_true;
-
- while (*curs) {
- if (match_comp (val, *curs)) {
- priv = _gf_false;
- sd->common = _gf_true;
- }
- curs++;
+ void **dataa = data;
+ struct gf_set_descriptor *sd = dataa[0];
+ char **curs = dataa[1];
+ gf_boolean_t priv = _gf_true;
+
+ while (*curs) {
+ if (match_comp(val, *curs)) {
+ priv = _gf_false;
+ sd->common = _gf_true;
}
+ curs++;
+ }
- if (priv)
- sd->priv[0] = _gf_true;
+ if (priv)
+ sd->priv[0] = _gf_true;
- return 0;
+ return 0;
}
static int
-_gf_set_dict_iter2 (char *val, void *data)
+_gf_set_dict_iter2(char *val, void *data)
{
- void **dataa = data;
- gf_boolean_t *boo = dataa[0];
- char *comp = dataa[1];
+ void **dataa = data;
+ gf_boolean_t *boo = dataa[0];
+ char *comp = dataa[1];
- if (match_comp (val, comp))
- *boo = _gf_true;
+ if (match_comp(val, comp))
+ *boo = _gf_true;
- return 0;
+ return 0;
}
static void
-relate_sets (struct gf_set_descriptor *sd, dict_t *argdict, char **complist)
+relate_sets(struct gf_set_descriptor *sd, dict_t *argdict, char **complist)
{
- void *dataa[] = {NULL, NULL};
- gf_boolean_t boo = _gf_false;
+ void *dataa[] = {NULL, NULL};
+ gf_boolean_t boo = _gf_false;
- memset (sd, 0, sizeof (*sd));
+ memset(sd, 0, sizeof(*sd));
- dataa[0] = sd;
- dataa[1] = complist;
- seq_dict_foreach (argdict, _gf_set_dict_iter1, dataa);
+ dataa[0] = sd;
+ dataa[1] = complist;
+ seq_dict_foreach(argdict, _gf_set_dict_iter1, dataa);
- while (*complist) {
- boo = _gf_false;
- dataa[0] = &boo;
- dataa[1] = *complist;
- seq_dict_foreach (argdict, _gf_set_dict_iter2, dataa);
+ while (*complist) {
+ boo = _gf_false;
+ dataa[0] = &boo;
+ dataa[1] = *complist;
+ seq_dict_foreach(argdict, _gf_set_dict_iter2, dataa);
- if (boo)
- sd->common = _gf_true;
- else
- sd->priv[1] = _gf_true;
+ if (boo)
+ sd->common = _gf_true;
+ else
+ sd->priv[1] = _gf_true;
- complist++;
- }
+ complist++;
+ }
}
static int
-_arg_parse_uid (char *val, void *data)
+_arg_parse_uid(char *val, void *data)
{
- char *user = strtail (val, "user-map-root=");
- struct passwd *pw = NULL;
+ char *user = strtail(val, "user-map-root=");
+ struct passwd *pw = NULL;
- if (!user)
- return 0;
- pw = getpwnam (user);
- if (!pw)
- return -EINVAL;
+ if (!user)
+ return 0;
+ pw = getpwnam(user);
+ if (!pw)
+ return -EINVAL;
- if (*(int *)data >= 0)
- /* uid ambiguity, already found */
- return -EINVAL;
+ if (*(int *)data >= 0)
+ /* uid ambiguity, already found */
+ return -EINVAL;
- *(int *)data = pw->pw_uid;
- return 0;
+ *(int *)data = pw->pw_uid;
+ return 0;
}
static int
-evaluate_mount_request (xlator_t *this, gf_mount_spec_t *mspec, dict_t *argdict)
+evaluate_mount_request(xlator_t *this, gf_mount_spec_t *mspec, dict_t *argdict)
{
- struct gf_set_descriptor sd = {{0,},};
- int i = 0;
- int uid = -1;
- int ret = 0;
- gf_boolean_t match = _gf_false;
-
- for (i = 0; i < mspec->len; i++) {
- relate_sets (&sd, argdict, mspec->patterns[i].components);
- switch (mspec->patterns[i].condition) {
- case SET_SUB:
- match = !sd.priv[0];
- break;
- case SET_SUPER:
- match = !sd.priv[1];
- break;
- case SET_EQUAL:
- match = (!sd.priv[0] && !sd.priv[1]);
- break;
- case SET_INTERSECT:
- match = sd.common;
- break;
- default:
- GF_ASSERT(!"unreached");
- }
- if (mspec->patterns[i].negative)
- match = !match;
-
- if (!match) {
- gf_msg (this->name, GF_LOG_ERROR, EPERM,
- GD_MSG_MNTBROKER_SPEC_MISMATCH,
- "Mountbroker spec mismatch!!! SET: %d "
- "COMPONENT: %d. Review the mount args passed",
- mspec->patterns[i].condition, i);
- return -EPERM;
- }
+ struct gf_set_descriptor sd = {
+ {
+ 0,
+ },
+ };
+ int i = 0;
+ int uid = -1;
+ int ret = 0;
+ gf_boolean_t match = _gf_false;
+
+ for (i = 0; i < mspec->len; i++) {
+ relate_sets(&sd, argdict, mspec->patterns[i].components);
+ switch (mspec->patterns[i].condition) {
+ case SET_SUB:
+ match = !sd.priv[0];
+ break;
+ case SET_SUPER:
+ match = !sd.priv[1];
+ break;
+ case SET_EQUAL:
+ match = (!sd.priv[0] && !sd.priv[1]);
+ break;
+ case SET_INTERSECT:
+ match = sd.common;
+ break;
+ default:
+ GF_ASSERT(!"unreached");
}
+ if (mspec->patterns[i].negative)
+ match = !match;
+
+ if (!match) {
+ gf_msg(this->name, GF_LOG_ERROR, EPERM,
+ GD_MSG_MNTBROKER_SPEC_MISMATCH,
+ "Mountbroker spec mismatch!!! SET: %d "
+ "COMPONENT: %d. Review the mount args passed",
+ mspec->patterns[i].condition, i);
+ return -EPERM;
+ }
+ }
- ret = seq_dict_foreach (argdict, _arg_parse_uid, &uid);
- if (ret != 0)
- return ret;
+ ret = seq_dict_foreach(argdict, _arg_parse_uid, &uid);
+ if (ret != 0)
+ return ret;
- return uid;
+ return uid;
}
static int
-_volname_get (char *val, void *data)
+_volname_get(char *val, void *data)
{
- char **volname = data;
+ char **volname = data;
- *volname = strtail (val, "volfile-id=");
+ *volname = strtail(val, "volfile-id=");
- return *volname ? 1 : 0;
+ return *volname ? 1 : 0;
}
static int
-_runner_add (char *val, void *data)
+_runner_add(char *val, void *data)
{
- runner_t *runner = data;
+ runner_t *runner = data;
- runner_argprintf (runner, "--%s", val);
+ runner_argprintf(runner, "--%s", val);
- return 0;
+ return 0;
}
int
-glusterd_do_mount (char *label, dict_t *argdict, char **path, int *op_errno)
+glusterd_do_mount(char *label, dict_t *argdict, char **path, int *op_errno)
{
- glusterd_conf_t *priv = NULL;
- char *mountbroker_root = NULL;
- gf_mount_spec_t *mspec = NULL;
- int uid = -ENOENT;
- char *volname = NULL;
- glusterd_volinfo_t *vol = NULL;
- char *mtptemp = NULL;
- char *mntlink = NULL;
- char *cookieswitch = NULL;
- char *cookie = NULL;
- char *sla = NULL;
- struct stat st = {0,};
- runner_t runner = {0,};
- int ret = 0;
- xlator_t *this = THIS;
- mode_t orig_umask = 0;
- gf_boolean_t found_label = _gf_false;
-
- priv = this->private;
- GF_ASSERT (priv);
-
- GF_ASSERT (op_errno);
- *op_errno = 0;
-
- if (dict_get_strn (this->options, "mountbroker-root",
- SLEN ("mountbroker-root"),
- &mountbroker_root) != 0) {
- *op_errno = ENOENT;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "'option mountbroker-root' "
- "missing in glusterd vol file");
- goto out;
- }
-
- GF_ASSERT (label);
- if (!*label) {
- *op_errno = EINVAL;
- gf_msg (this->name, GF_LOG_ERROR, *op_errno,
- GD_MSG_MNTBROKER_LABEL_NULL,
- "label is NULL (%s)",
- strerror (*op_errno));
- goto out;
- }
-
- /* look up spec for label */
- cds_list_for_each_entry (mspec, &priv->mount_specs,
- speclist) {
- if (strcmp (mspec->label, label) != 0)
- continue;
-
- found_label = _gf_true;
- uid = evaluate_mount_request (this, mspec, argdict);
- break;
+ glusterd_conf_t *priv = NULL;
+ char *mountbroker_root = NULL;
+ gf_mount_spec_t *mspec = NULL;
+ int uid = -ENOENT;
+ char *volname = NULL;
+ glusterd_volinfo_t *vol = NULL;
+ char *mtptemp = NULL;
+ char *mntlink = NULL;
+ char *cookieswitch = NULL;
+ char *cookie = NULL;
+ char *sla = NULL;
+ struct stat st = {
+ 0,
+ };
+ runner_t runner = {
+ 0,
+ };
+ int ret = 0;
+ xlator_t *this = THIS;
+ mode_t orig_umask = 0;
+ gf_boolean_t found_label = _gf_false;
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ GF_ASSERT(op_errno);
+ *op_errno = 0;
+
+ if (dict_get_strn(this->options, "mountbroker-root",
+ SLEN("mountbroker-root"), &mountbroker_root) != 0) {
+ *op_errno = ENOENT;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "'option mountbroker-root' "
+ "missing in glusterd vol file");
+ goto out;
+ }
+
+ GF_ASSERT(label);
+ if (!*label) {
+ *op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_ERROR, *op_errno, GD_MSG_MNTBROKER_LABEL_NULL,
+ "label is NULL (%s)", strerror(*op_errno));
+ goto out;
+ }
+
+ /* look up spec for label */
+ cds_list_for_each_entry(mspec, &priv->mount_specs, speclist)
+ {
+ if (strcmp(mspec->label, label) != 0)
+ continue;
+
+ found_label = _gf_true;
+ uid = evaluate_mount_request(this, mspec, argdict);
+ break;
+ }
+ if (uid < 0) {
+ *op_errno = -uid;
+ if (!found_label) {
+ gf_msg(this->name, GF_LOG_ERROR, *op_errno,
+ GD_MSG_MNTBROKER_LABEL_MISS,
+ "Missing mspec: Check the corresponding option "
+ "in glusterd vol file for mountbroker user: %s",
+ label);
}
- if (uid < 0) {
- *op_errno = -uid;
- if (!found_label) {
- gf_msg (this->name, GF_LOG_ERROR, *op_errno,
- GD_MSG_MNTBROKER_LABEL_MISS,
- "Missing mspec: Check the corresponding option "
- "in glusterd vol file for mountbroker user: %s",
- label);
- }
- goto out;
+ goto out;
+ }
+
+ /* some sanity check on arguments */
+ seq_dict_foreach(argdict, _volname_get, &volname);
+ if (!volname) {
+ *op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_DICT_GET_FAILED,
+ "Dict get failed for the key 'volname'");
+ goto out;
+ }
+ if (glusterd_volinfo_find(volname, &vol) != 0 ||
+ !glusterd_is_volume_started(vol)) {
+ *op_errno = ENOENT;
+ gf_msg(this->name, GF_LOG_ERROR, *op_errno, GD_MSG_MOUNT_REQ_FAIL,
+ "Either volume is not started or volinfo not found");
+ goto out;
+ }
+
+ /* go do mount */
+
+ /** create actual mount dir */
+
+ /*** "overload" string name to be possible to used for cookie
+ creation, see below */
+ ret = gf_asprintf(&mtptemp, "%s/user%d/mtpt-%s-XXXXXX/cookie",
+ mountbroker_root, uid, label);
+ if (ret == -1) {
+ mtptemp = NULL;
+ *op_errno = ENOMEM;
+ goto out;
+ }
+ /*** hide cookie part */
+ cookieswitch = strrchr(mtptemp, '/');
+ *cookieswitch = '\0';
+
+ sla = strrchr(mtptemp, '/');
+ *sla = '\0';
+ ret = sys_mkdir(mtptemp, 0700);
+ if (ret == 0)
+ ret = sys_chown(mtptemp, uid, 0);
+ else if (errno == EEXIST)
+ ret = 0;
+ if (ret == -1) {
+ *op_errno = errno;
+ gf_msg(this->name, GF_LOG_ERROR, *op_errno, GD_MSG_SYSCALL_FAIL,
+ "Mountbroker User directory creation failed");
+ goto out;
+ }
+ ret = sys_lstat(mtptemp, &st);
+ if (ret == -1) {
+ *op_errno = errno;
+ gf_msg(this->name, GF_LOG_ERROR, *op_errno, GD_MSG_SYSCALL_FAIL,
+ "stat on mountbroker user directory failed");
+ goto out;
+ }
+ if (!(S_ISDIR(st.st_mode) && (st.st_mode & ~S_IFMT) == 0700 &&
+ st.st_uid == uid && st.st_gid == 0)) {
+ *op_errno = EACCES;
+ gf_msg(this->name, GF_LOG_ERROR, *op_errno, GD_MSG_MOUNT_REQ_FAIL,
+ "Incorrect mountbroker user directory attributes");
+ goto out;
+ }
+ *sla = '/';
+
+ if (!mkdtemp(mtptemp)) {
+ *op_errno = errno;
+ gf_msg(this->name, GF_LOG_ERROR, *op_errno, GD_MSG_SYSCALL_FAIL,
+ "Mountbroker mount directory creation failed");
+ goto out;
+ }
+
+ /** create private "cookie" symlink */
+
+ /*** occupy an entry in the hive dir via mkstemp */
+ ret = gf_asprintf(&cookie, "%s/" MB_HIVE "/mntXXXXXX", mountbroker_root);
+ if (ret == -1) {
+ cookie = NULL;
+ *op_errno = ENOMEM;
+ goto out;
+ }
+ orig_umask = umask(S_IRWXG | S_IRWXO);
+ ret = mkstemp(cookie);
+ umask(orig_umask);
+ if (ret == -1) {
+ *op_errno = errno;
+ gf_msg(this->name, GF_LOG_ERROR, *op_errno, GD_MSG_SYSCALL_FAIL,
+ "Mountbroker cookie file creation failed");
+ goto out;
+ }
+ sys_close(ret);
+
+ /*** assembly the path from cookie to mountpoint */
+ sla = strchr(sla - 1, '/');
+ GF_ASSERT(sla);
+ ret = gf_asprintf(&mntlink, "../user%d%s", uid, sla);
+ if (ret == -1) {
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ /*** create cookie link in (to-be) mountpoint,
+ move it over to the final place */
+ *cookieswitch = '/';
+ ret = sys_symlink(mntlink, mtptemp);
+ if (ret != -1)
+ ret = sys_rename(mtptemp, cookie);
+ *cookieswitch = '\0';
+ if (ret == -1) {
+ *op_errno = errno;
+ gf_msg(this->name, GF_LOG_ERROR, *op_errno, GD_MSG_SYSCALL_FAIL,
+ "symlink or rename failed");
+ goto out;
+ }
+
+ /** invoke glusterfs on the mountpoint */
+
+ runinit(&runner);
+ runner_add_arg(&runner, SBIN_DIR "/glusterfs");
+ seq_dict_foreach(argdict, _runner_add, &runner);
+ runner_add_arg(&runner, mtptemp);
+ ret = runner_run_reuse(&runner);
+ if (ret == -1) {
+ *op_errno = EIO; /* XXX hacky fake */
+ runner_log(&runner, "", GF_LOG_ERROR, "command failed");
+ }
+ runner_end(&runner);
+
+out:
+
+ if (*op_errno) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_WARNING, *op_errno, GD_MSG_MOUNT_REQ_FAIL,
+ "unsuccessful mount request");
+ if (mtptemp) {
+ *cookieswitch = '/';
+ sys_unlink(mtptemp);
+ *cookieswitch = '\0';
+ sys_rmdir(mtptemp);
}
-
- /* some sanity check on arguments */
- seq_dict_foreach (argdict, _volname_get, &volname);
- if (!volname) {
- *op_errno = EINVAL;
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_DICT_GET_FAILED,
- "Dict get failed for the key 'volname'");
- goto out;
- }
- if (glusterd_volinfo_find (volname, &vol) != 0 ||
- !glusterd_is_volume_started (vol)) {
- *op_errno = ENOENT;
- gf_msg (this->name, GF_LOG_ERROR, *op_errno,
- GD_MSG_MOUNT_REQ_FAIL,
- "Either volume is not started or volinfo not found");
- goto out;
+ if (cookie) {
+ sys_unlink(cookie);
+ GF_FREE(cookie);
}
- /* go do mount */
+ } else {
+ ret = 0;
+ *path = cookie;
+ }
- /** create actual mount dir */
+ if (mtptemp)
+ GF_FREE(mtptemp);
+ if (mntlink)
+ GF_FREE(mntlink);
- /*** "overload" string name to be possible to used for cookie
- creation, see below */
- ret = gf_asprintf (&mtptemp, "%s/user%d/mtpt-%s-XXXXXX/cookie",
- mountbroker_root, uid, label);
- if (ret == -1) {
- mtptemp = NULL;
- *op_errno = ENOMEM;
- goto out;
- }
- /*** hide cookie part */
- cookieswitch = strrchr (mtptemp, '/');
- *cookieswitch = '\0';
-
- sla = strrchr (mtptemp, '/');
- *sla = '\0';
- ret = sys_mkdir (mtptemp, 0700);
- if (ret == 0)
- ret = sys_chown (mtptemp, uid, 0);
- else if (errno == EEXIST)
- ret = 0;
- if (ret == -1) {
- *op_errno = errno;
- gf_msg (this->name, GF_LOG_ERROR, *op_errno,
- GD_MSG_SYSCALL_FAIL,
- "Mountbroker User directory creation failed");
- goto out;
- }
- ret = sys_lstat (mtptemp, &st);
- if (ret == -1) {
- *op_errno = errno;
- gf_msg (this->name, GF_LOG_ERROR, *op_errno,
- GD_MSG_SYSCALL_FAIL,
- "stat on mountbroker user directory failed");
- goto out;
- }
- if (!(S_ISDIR (st.st_mode) && (st.st_mode & ~S_IFMT) == 0700 &&
- st.st_uid == uid && st.st_gid == 0)) {
- *op_errno = EACCES;
- gf_msg (this->name, GF_LOG_ERROR, *op_errno,
- GD_MSG_MOUNT_REQ_FAIL,
- "Incorrect mountbroker user directory attributes");
- goto out;
- }
- *sla = '/';
-
- if (!mkdtemp (mtptemp)) {
- *op_errno = errno;
- gf_msg (this->name, GF_LOG_ERROR, *op_errno,
- GD_MSG_SYSCALL_FAIL,
- "Mountbroker mount directory creation failed");
- goto out;
- }
-
- /** create private "cookie" symlink */
-
- /*** occupy an entry in the hive dir via mkstemp */
- ret = gf_asprintf (&cookie, "%s/"MB_HIVE"/mntXXXXXX",
- mountbroker_root);
- if (ret == -1) {
- cookie = NULL;
- *op_errno = ENOMEM;
- goto out;
- }
- orig_umask = umask(S_IRWXG | S_IRWXO);
- ret = mkstemp (cookie);
- umask(orig_umask);
- if (ret == -1) {
- *op_errno = errno;
- gf_msg (this->name, GF_LOG_ERROR, *op_errno,
- GD_MSG_SYSCALL_FAIL,
- "Mountbroker cookie file creation failed");
- goto out;
- }
- sys_close (ret);
-
- /*** assembly the path from cookie to mountpoint */
- sla = strchr (sla - 1, '/');
- GF_ASSERT (sla);
- ret = gf_asprintf (&mntlink, "../user%d%s", uid, sla);
- if (ret == -1) {
- *op_errno = ENOMEM;
- goto out;
- }
-
- /*** create cookie link in (to-be) mountpoint,
- move it over to the final place */
- *cookieswitch = '/';
- ret = sys_symlink (mntlink, mtptemp);
- if (ret != -1)
- ret = sys_rename (mtptemp, cookie);
- *cookieswitch = '\0';
- if (ret == -1) {
- *op_errno = errno;
- gf_msg (this->name, GF_LOG_ERROR, *op_errno,
- GD_MSG_SYSCALL_FAIL,
- "symlink or rename failed");
- goto out;
- }
-
- /** invoke glusterfs on the mountpoint */
-
- runinit (&runner);
- runner_add_arg (&runner, SBIN_DIR"/glusterfs");
- seq_dict_foreach (argdict, _runner_add, &runner);
- runner_add_arg (&runner, mtptemp);
- ret = runner_run_reuse (&runner);
- if (ret == -1) {
- *op_errno = EIO; /* XXX hacky fake */
- runner_log (&runner, "", GF_LOG_ERROR, "command failed");
- }
- runner_end (&runner);
-
- out:
-
- if (*op_errno) {
- ret = -1;
- gf_msg (this->name, GF_LOG_WARNING, *op_errno,
- GD_MSG_MOUNT_REQ_FAIL,
- "unsuccessful mount request");
- if (mtptemp) {
- *cookieswitch = '/';
- sys_unlink (mtptemp);
- *cookieswitch = '\0';
- sys_rmdir (mtptemp);
- }
- if (cookie) {
- sys_unlink (cookie);
- GF_FREE (cookie);
- }
-
- } else {
- ret = 0;
- *path = cookie;
- }
-
- if (mtptemp)
- GF_FREE (mtptemp);
- if (mntlink)
- GF_FREE (mntlink);
-
- return ret;
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
index 32b1064c002..c153719545b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-nfs-svc.c
@@ -19,212 +19,207 @@
#include "glusterd-svc-helper.h"
static gf_boolean_t
-glusterd_nfssvc_need_start ()
+glusterd_nfssvc_need_start()
{
- glusterd_conf_t *priv = NULL;
- gf_boolean_t start = _gf_false;
- glusterd_volinfo_t *volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t start = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
- priv = THIS->private;
+ priv = THIS->private;
- cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
- if (!glusterd_is_volume_started (volinfo))
- continue;
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+ if (!glusterd_is_volume_started(volinfo))
+ continue;
- if (dict_get_str_boolean (volinfo->dict, NFS_DISABLE_MAP_KEY, 1))
- continue;
- start = _gf_true;
- break;
- }
+ if (dict_get_str_boolean(volinfo->dict, NFS_DISABLE_MAP_KEY, 1))
+ continue;
+ start = _gf_true;
+ break;
+ }
- return start;
+ return start;
}
static int
-glusterd_nfssvc_create_volfile ()
+glusterd_nfssvc_create_volfile()
{
- char filepath[PATH_MAX] = {0,};
- glusterd_conf_t *conf = THIS->private;
-
- glusterd_svc_build_volfile_path (conf->nfs_svc.name, conf->workdir,
- filepath, sizeof (filepath));
- return glusterd_create_global_volfile (build_nfs_graph,
- filepath, NULL);
+ char filepath[PATH_MAX] = {
+ 0,
+ };
+ glusterd_conf_t *conf = THIS->private;
+
+ glusterd_svc_build_volfile_path(conf->nfs_svc.name, conf->workdir, filepath,
+ sizeof(filepath));
+ return glusterd_create_global_volfile(build_nfs_graph, filepath, NULL);
}
static int
-glusterd_nfssvc_manager (glusterd_svc_t *svc, void *data, int flags)
+glusterd_nfssvc_manager(glusterd_svc_t *svc, void *data, int flags)
{
- int ret = -1;
-
- if (!svc->inited) {
- ret = glusterd_svc_init (svc, "nfs");
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_FAILED_INIT_NFSSVC,
- "Failed to init nfs service");
- goto out;
- } else {
- svc->inited = _gf_true;
- gf_msg_debug (THIS->name, 0,
- "nfs service initialized");
- }
+ int ret = -1;
+
+ if (!svc->inited) {
+ ret = glusterd_svc_init(svc, "nfs");
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_NFSSVC,
+ "Failed to init nfs service");
+ goto out;
+ } else {
+ svc->inited = _gf_true;
+ gf_msg_debug(THIS->name, 0, "nfs service initialized");
}
+ }
- ret = svc->stop (svc, SIGKILL);
- if (ret)
- goto out;
-
- /* not an error, or a (very) soft error at best */
- if (sys_access (XLATORDIR "/nfs/server.so", R_OK) != 0) {
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- GD_MSG_GNFS_XLATOR_NOT_INSTALLED,
- "nfs/server.so xlator is not installed");
- goto out;
- }
+ ret = svc->stop(svc, SIGKILL);
+ if (ret)
+ goto out;
- ret = glusterd_nfssvc_create_volfile ();
- if (ret)
- goto out;
+ /* not an error, or a (very) soft error at best */
+ if (sys_access(XLATORDIR "/nfs/server.so", R_OK) != 0) {
+ gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_GNFS_XLATOR_NOT_INSTALLED,
+ "nfs/server.so xlator is not installed");
+ goto out;
+ }
- if (glusterd_nfssvc_need_start ()) {
- ret = svc->start (svc, flags);
- if (ret)
- goto out;
+ ret = glusterd_nfssvc_create_volfile();
+ if (ret)
+ goto out;
- ret = glusterd_conn_connect (&(svc->conn));
- if (ret)
- goto out;
- }
-out:
+ if (glusterd_nfssvc_need_start()) {
+ ret = svc->start(svc, flags);
if (ret)
- gf_event (EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+ goto out;
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ ret = glusterd_conn_connect(&(svc->conn));
+ if (ret)
+ goto out;
+ }
+out:
+ if (ret)
+ gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
- return ret;
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+
+ return ret;
}
static int
-glusterd_nfssvc_start (glusterd_svc_t *svc, int flags)
+glusterd_nfssvc_start(glusterd_svc_t *svc, int flags)
{
- return glusterd_svc_start (svc, flags, NULL);
+ return glusterd_svc_start(svc, flags, NULL);
}
static int
-glusterd_nfssvc_stop (glusterd_svc_t *svc, int sig)
+glusterd_nfssvc_stop(glusterd_svc_t *svc, int sig)
{
- int ret = -1;
- gf_boolean_t deregister = _gf_false;
+ int ret = -1;
+ gf_boolean_t deregister = _gf_false;
- if (glusterd_proc_is_running (&(svc->proc)))
- deregister = _gf_true;
+ if (glusterd_proc_is_running(&(svc->proc)))
+ deregister = _gf_true;
- ret = glusterd_svc_stop (svc, sig);
- if (ret)
- goto out;
- if (deregister)
- glusterd_nfs_pmap_deregister ();
+ ret = glusterd_svc_stop(svc, sig);
+ if (ret)
+ goto out;
+ if (deregister)
+ glusterd_nfs_pmap_deregister();
out:
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
void
-glusterd_nfssvc_build (glusterd_svc_t *svc)
+glusterd_nfssvc_build(glusterd_svc_t *svc)
{
- svc->manager = glusterd_nfssvc_manager;
- svc->start = glusterd_nfssvc_start;
- svc->stop = glusterd_nfssvc_stop;
+ svc->manager = glusterd_nfssvc_manager;
+ svc->start = glusterd_nfssvc_start;
+ svc->stop = glusterd_nfssvc_stop;
}
int
-glusterd_nfssvc_reconfigure ()
+glusterd_nfssvc_reconfigure()
{
- int ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- gf_boolean_t identical = _gf_false;
- gf_boolean_t vol_started = _gf_false;
- glusterd_volinfo_t *volinfo = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO (this->name, this, out);
-
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
-
- /* not an error, or a (very) soft error at best */
- if (sys_access (XLATORDIR "/nfs/server.so", R_OK) != 0) {
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- GD_MSG_GNFS_XLATOR_NOT_INSTALLED,
- "nfs/server.so xlator is not installed");
- ret = 0;
- goto out;
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t identical = _gf_false;
+ gf_boolean_t vol_started = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(this->name, this, out);
+
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
+
+ /* not an error, or a (very) soft error at best */
+ if (sys_access(XLATORDIR "/nfs/server.so", R_OK) != 0) {
+ gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_GNFS_XLATOR_NOT_INSTALLED,
+ "nfs/server.so xlator is not installed");
+ ret = 0;
+ goto out;
+ }
+
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+ if (GLUSTERD_STATUS_STARTED == volinfo->status) {
+ vol_started = _gf_true;
+ break;
}
-
- cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
- if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- vol_started = _gf_true;
- break;
- }
- }
- if (!vol_started) {
- ret = 0;
- goto out;
+ }
+ if (!vol_started) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * Check both OLD and NEW volfiles, if they are SAME by size
+ * and cksum i.e. "character-by-character". If YES, then
+ * NOTHING has been changed, just return.
+ */
+
+ ret = glusterd_svc_check_volfile_identical(priv->nfs_svc.name,
+ build_nfs_graph, &identical);
+ if (ret)
+ goto out;
+
+ if (identical) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * They are not identical. Find out if the topology is changed
+ * OR just the volume options. If just the options which got
+ * changed, then inform the xlator to reconfigure the options.
+ */
+ identical = _gf_false; /* RESET the FLAG */
+ ret = glusterd_svc_check_topology_identical(priv->nfs_svc.name,
+ build_nfs_graph, &identical);
+ if (ret)
+ goto out;
+
+ /* Topology is not changed, but just the options. But write the
+ * options to NFS volfile, so that NFS will be reconfigured.
+ */
+ if (identical) {
+ ret = glusterd_nfssvc_create_volfile();
+ if (ret == 0) { /* Only if above PASSES */
+ ret = glusterd_fetchspec_notify(THIS);
}
+ goto out;
+ }
- /*
- * Check both OLD and NEW volfiles, if they are SAME by size
- * and cksum i.e. "character-by-character". If YES, then
- * NOTHING has been changed, just return.
- */
-
- ret = glusterd_svc_check_volfile_identical (priv->nfs_svc.name,
- build_nfs_graph,
- &identical);
- if (ret)
- goto out;
-
- if (identical) {
- ret = 0;
- goto out;
- }
-
- /*
- * They are not identical. Find out if the topology is changed
- * OR just the volume options. If just the options which got
- * changed, then inform the xlator to reconfigure the options.
- */
- identical = _gf_false; /* RESET the FLAG */
- ret = glusterd_svc_check_topology_identical (priv->nfs_svc.name,
- build_nfs_graph,
- &identical);
- if (ret)
- goto out;
-
- /* Topology is not changed, but just the options. But write the
- * options to NFS volfile, so that NFS will be reconfigured.
- */
- if (identical) {
- ret = glusterd_nfssvc_create_volfile();
- if (ret == 0) {/* Only if above PASSES */
- ret = glusterd_fetchspec_notify (THIS);
- }
- goto out;
- }
-
- /*
- * NFS volfile's topology has been changed. NFS server needs
- * to be RESTARTED to ACT on the changed volfile.
- */
- ret = priv->nfs_svc.manager (&(priv->nfs_svc), NULL,
- PROC_START_NO_WAIT);
+ /*
+ * NFS volfile's topology has been changed. NFS server needs
+ * to be RESTARTED to ACT on the changed volfile.
+ */
+ ret = priv->nfs_svc.manager(&(priv->nfs_svc), NULL, PROC_START_NO_WAIT);
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
-
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index d7b1446204d..0944cc648bf 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -56,8 +56,8 @@
extern char local_node_hostname[PATH_MAX];
static int
-glusterd_set_shared_storage (dict_t *dict, char *key, char *value,
- char **op_errstr);
+glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
+ char **op_errstr);
/*
* Valid options for all volumes to be listed in the valid_all_vol_opts table.
@@ -68,4892 +68,4702 @@ glusterd_set_shared_storage (dict_t *dict, char *key, char *value,
* in glusterd_get_global_options_for_all_vols, or else we might crash there.
*/
glusterd_all_vol_opts valid_all_vol_opts[] = {
- { GLUSTERD_QUORUM_RATIO_KEY, "51" },
- { GLUSTERD_SHARED_STORAGE_KEY, "disable" },
- /* This one actually gets filled in dynamically. */
- { GLUSTERD_GLOBAL_OP_VERSION_KEY, "BUG_NO_OP_VERSION"},
- /*
- * This one should be filled in dynamically, but it didn't used to be
- * (before the defaults were added here) so the value is unclear.
- *
- * TBD: add a dynamic handler to set the appropriate value
- */
- { GLUSTERD_MAX_OP_VERSION_KEY, "BUG_NO_MAX_OP_VERSION"},
- { GLUSTERD_BRICK_MULTIPLEX_KEY, "disable"},
- /* Set this value to 0 by default implying brick-multiplexing
- * behaviour with no limit set on the number of brick instances that
- * can be attached per process.
- * TBD: Discuss the default value for this. Maybe this should be a
- * dynamic value depending on the memory specifications per node */
- { GLUSTERD_BRICKMUX_LIMIT_KEY, "0"},
- { GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},
- { GLUSTERD_DAEMON_LOG_LEVEL_KEY, "INFO"},
- { NULL },
+ {GLUSTERD_QUORUM_RATIO_KEY, "51"},
+ {GLUSTERD_SHARED_STORAGE_KEY, "disable"},
+ /* This one actually gets filled in dynamically. */
+ {GLUSTERD_GLOBAL_OP_VERSION_KEY, "BUG_NO_OP_VERSION"},
+ /*
+ * This one should be filled in dynamically, but it didn't used to be
+ * (before the defaults were added here) so the value is unclear.
+ *
+ * TBD: add a dynamic handler to set the appropriate value
+ */
+ {GLUSTERD_MAX_OP_VERSION_KEY, "BUG_NO_MAX_OP_VERSION"},
+ {GLUSTERD_BRICK_MULTIPLEX_KEY, "disable"},
+ /* Set this value to 0 by default implying brick-multiplexing
+ * behaviour with no limit set on the number of brick instances that
+ * can be attached per process.
+ * TBD: Discuss the default value for this. Maybe this should be a
+ * dynamic value depending on the memory specifications per node */
+ {GLUSTERD_BRICKMUX_LIMIT_KEY, "0"},
+ {GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},
+ {GLUSTERD_DAEMON_LOG_LEVEL_KEY, "INFO"},
+ {NULL},
};
static struct cds_list_head gd_op_sm_queue;
synclock_t gd_op_sm_lock;
-glusterd_op_info_t opinfo = {{0},};
+glusterd_op_info_t opinfo = {
+ {0},
+};
int
-glusterd_bricks_select_rebalance_volume (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected);
+glusterd_bricks_select_rebalance_volume(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected);
int
-glusterd_bricks_select_tier_volume (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected);
-
+glusterd_bricks_select_tier_volume(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected);
int32_t
-glusterd_txn_opinfo_dict_init ()
+glusterd_txn_opinfo_dict_init()
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
- priv->glusterd_txn_opinfo = dict_new ();
- if (!priv->glusterd_txn_opinfo) {
- ret = -1;
- goto out;
- }
+ priv->glusterd_txn_opinfo = dict_new();
+ if (!priv->glusterd_txn_opinfo) {
+ ret = -1;
+ goto out;
+ }
- memset (priv->global_txn_id, '\0', sizeof(uuid_t));
+ memset(priv->global_txn_id, '\0', sizeof(uuid_t));
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
void
-glusterd_txn_opinfo_dict_fini ()
+glusterd_txn_opinfo_dict_fini()
{
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
- if (priv->glusterd_txn_opinfo)
- dict_unref (priv->glusterd_txn_opinfo);
+ if (priv->glusterd_txn_opinfo)
+ dict_unref(priv->glusterd_txn_opinfo);
}
void
-glusterd_txn_opinfo_init (glusterd_op_info_t *opinfo,
- glusterd_op_sm_state_info_t *state, int *op,
- dict_t *op_ctx, rpcsvc_request_t *req)
+glusterd_txn_opinfo_init(glusterd_op_info_t *opinfo,
+ glusterd_op_sm_state_info_t *state, int *op,
+ dict_t *op_ctx, rpcsvc_request_t *req)
{
- glusterd_conf_t *conf = NULL;
+ glusterd_conf_t *conf = NULL;
- GF_ASSERT (opinfo);
+ GF_ASSERT(opinfo);
- conf = THIS->private;
- GF_ASSERT (conf);
+ conf = THIS->private;
+ GF_ASSERT(conf);
- if (state)
- opinfo->state = *state;
+ if (state)
+ opinfo->state = *state;
- if (op)
- opinfo->op = *op;
+ if (op)
+ opinfo->op = *op;
- if (op_ctx)
- opinfo->op_ctx = dict_ref(op_ctx);
- else
- opinfo->op_ctx = NULL;
+ if (op_ctx)
+ opinfo->op_ctx = dict_ref(op_ctx);
+ else
+ opinfo->op_ctx = NULL;
- if (req)
- opinfo->req = req;
+ if (req)
+ opinfo->req = req;
- opinfo->txn_generation = conf->generation;
- cmm_smp_rmb ();
+ opinfo->txn_generation = conf->generation;
+ cmm_smp_rmb();
- return;
+ return;
}
int32_t
-glusterd_generate_txn_id (dict_t *dict, uuid_t **txn_id)
+glusterd_generate_txn_id(dict_t *dict, uuid_t **txn_id)
{
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
- GF_ASSERT (dict);
-
- *txn_id = GF_MALLOC (sizeof(uuid_t), gf_common_mt_uuid_t);
- if (!*txn_id)
- goto out;
-
- if (priv->op_version < GD_OP_VERSION_3_6_0)
- gf_uuid_copy (**txn_id, priv->global_txn_id);
- else
- gf_uuid_generate (**txn_id);
-
- ret = dict_set_bin (dict, "transaction_id",
- *txn_id, sizeof (**txn_id));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set transaction id.");
- goto out;
- }
-
- gf_msg_debug (this->name, 0,
- "Transaction_id = %s", uuid_utoa (**txn_id));
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(dict);
+
+ *txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!*txn_id)
+ goto out;
+
+ if (priv->op_version < GD_OP_VERSION_3_6_0)
+ gf_uuid_copy(**txn_id, priv->global_txn_id);
+ else
+ gf_uuid_generate(**txn_id);
+
+ ret = dict_set_bin(dict, "transaction_id", *txn_id, sizeof(**txn_id));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set transaction id.");
+ goto out;
+ }
+
+ gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(**txn_id));
out:
- if (ret && *txn_id) {
- GF_FREE (*txn_id);
- *txn_id = NULL;
- }
+ if (ret && *txn_id) {
+ GF_FREE(*txn_id);
+ *txn_id = NULL;
+ }
- return ret;
+ return ret;
}
int32_t
-glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
+glusterd_get_txn_opinfo(uuid_t *txn_id, glusterd_op_info_t *opinfo)
{
- int32_t ret = -1;
- glusterd_txn_opinfo_obj *opinfo_obj = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- if (!txn_id || !opinfo) {
- gf_msg_callingfn (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_ID_GET_FAIL,
- "Empty transaction id or opinfo received.");
- ret = -1;
- goto out;
- }
+ int32_t ret = -1;
+ glusterd_txn_opinfo_obj *opinfo_obj = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (!txn_id || !opinfo) {
+ gf_msg_callingfn(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
+ "Empty transaction id or opinfo received.");
+ ret = -1;
+ goto out;
+ }
- ret = dict_get_bin(priv->glusterd_txn_opinfo,
- uuid_utoa (*txn_id),
- (void **) &opinfo_obj);
- if (ret)
- goto out;
+ ret = dict_get_bin(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id),
+ (void **)&opinfo_obj);
+ if (ret)
+ goto out;
- (*opinfo) = opinfo_obj->opinfo;
+ (*opinfo) = opinfo_obj->opinfo;
- gf_msg_debug (this->name, 0,
- "Successfully got opinfo for transaction ID : %s",
- uuid_utoa (*txn_id));
+ gf_msg_debug(this->name, 0,
+ "Successfully got opinfo for transaction ID : %s",
+ uuid_utoa(*txn_id));
- ret = 0;
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int32_t
-glusterd_set_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
+glusterd_set_txn_opinfo(uuid_t *txn_id, glusterd_op_info_t *opinfo)
{
- int32_t ret = -1;
- glusterd_txn_opinfo_obj *opinfo_obj = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- if (!txn_id) {
- gf_msg_callingfn (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_ID_GET_FAIL,
- "Empty transaction id received.");
- ret = -1;
- goto out;
+ int32_t ret = -1;
+ glusterd_txn_opinfo_obj *opinfo_obj = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (!txn_id) {
+ gf_msg_callingfn(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
+ "Empty transaction id received.");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_bin(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id),
+ (void **)&opinfo_obj);
+ if (ret) {
+ opinfo_obj = GF_CALLOC(1, sizeof(glusterd_txn_opinfo_obj),
+ gf_common_mt_txn_opinfo_obj_t);
+ if (!opinfo_obj) {
+ ret = -1;
+ goto out;
}
- ret = dict_get_bin(priv->glusterd_txn_opinfo,
- uuid_utoa (*txn_id),
- (void **) &opinfo_obj);
+ ret = dict_set_bin(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id),
+ opinfo_obj, sizeof(glusterd_txn_opinfo_obj));
if (ret) {
- opinfo_obj = GF_CALLOC (1, sizeof(glusterd_txn_opinfo_obj),
- gf_common_mt_txn_opinfo_obj_t);
- if (!opinfo_obj) {
- ret = -1;
- goto out;
- }
-
- ret = dict_set_bin(priv->glusterd_txn_opinfo,
- uuid_utoa (*txn_id), opinfo_obj,
- sizeof(glusterd_txn_opinfo_obj));
- if (ret) {
- gf_msg_callingfn (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED,
- "Unable to set opinfo for transaction"
- " ID : %s", uuid_utoa (*txn_id));
- goto out;
- }
+ gf_msg_callingfn(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
+ "Unable to set opinfo for transaction"
+ " ID : %s",
+ uuid_utoa(*txn_id));
+ goto out;
}
+ }
- opinfo_obj->opinfo = (*opinfo);
+ opinfo_obj->opinfo = (*opinfo);
- gf_msg_debug (this->name, 0,
- "Successfully set opinfo for transaction ID : %s",
- uuid_utoa (*txn_id));
- ret = 0;
+ gf_msg_debug(this->name, 0,
+ "Successfully set opinfo for transaction ID : %s",
+ uuid_utoa(*txn_id));
+ ret = 0;
out:
- if (ret)
- if (opinfo_obj)
- GF_FREE (opinfo_obj);
+ if (ret)
+ if (opinfo_obj)
+ GF_FREE(opinfo_obj);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int32_t
-glusterd_clear_txn_opinfo (uuid_t *txn_id)
+glusterd_clear_txn_opinfo(uuid_t *txn_id)
{
- int32_t ret = -1;
- glusterd_op_info_t txn_op_info = {{0},};
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- if (!txn_id) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_ID_GET_FAIL,
- "Empty transaction id received.");
- ret = -1;
- goto out;
- }
+ int32_t ret = -1;
+ glusterd_op_info_t txn_op_info = {
+ {0},
+ };
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (!txn_id) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
+ "Empty transaction id received.");
+ ret = -1;
+ goto out;
+ }
- ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
- if (ret) {
- gf_msg_callingfn (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_GET_FAIL,
- "Unable to get transaction opinfo "
- "for transaction ID : %s",
- uuid_utoa (*txn_id));
- goto out;
- }
+ ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
+ if (ret) {
+ gf_msg_callingfn(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_OPINFO_GET_FAIL,
+ "Unable to get transaction opinfo "
+ "for transaction ID : %s",
+ uuid_utoa(*txn_id));
+ goto out;
+ }
- if (txn_op_info.op_ctx)
- dict_unref (txn_op_info.op_ctx);
+ if (txn_op_info.op_ctx)
+ dict_unref(txn_op_info.op_ctx);
- dict_del(priv->glusterd_txn_opinfo, uuid_utoa (*txn_id));
+ dict_del(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id));
- gf_msg_debug (this->name, 0,
- "Successfully cleared opinfo for transaction ID : %s",
- uuid_utoa (*txn_id));
+ gf_msg_debug(this->name, 0,
+ "Successfully cleared opinfo for transaction ID : %s",
+ uuid_utoa(*txn_id));
- ret = 0;
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int glusterfs_port = GLUSTERD_DEFAULT_PORT;
static char *glusterd_op_sm_state_names[] = {
- "Default",
- "Lock sent",
- "Locked",
- "Stage op sent",
- "Staged",
- "Commit op sent",
- "Committed",
- "Unlock sent",
- "Stage op failed",
- "Commit op failed",
- "Brick op sent",
- "Brick op failed",
- "Brick op Committed",
- "Brick op Commit failed",
- "Ack drain",
- "Invalid",
+ "Default",
+ "Lock sent",
+ "Locked",
+ "Stage op sent",
+ "Staged",
+ "Commit op sent",
+ "Committed",
+ "Unlock sent",
+ "Stage op failed",
+ "Commit op failed",
+ "Brick op sent",
+ "Brick op failed",
+ "Brick op Committed",
+ "Brick op Commit failed",
+ "Ack drain",
+ "Invalid",
};
static char *glusterd_op_sm_event_names[] = {
- "GD_OP_EVENT_NONE",
- "GD_OP_EVENT_START_LOCK",
- "GD_OP_EVENT_LOCK",
- "GD_OP_EVENT_RCVD_ACC",
- "GD_OP_EVENT_ALL_ACC",
- "GD_OP_EVENT_STAGE_ACC",
- "GD_OP_EVENT_COMMIT_ACC",
- "GD_OP_EVENT_RCVD_RJT",
- "GD_OP_EVENT_STAGE_OP",
- "GD_OP_EVENT_COMMIT_OP",
- "GD_OP_EVENT_UNLOCK",
- "GD_OP_EVENT_START_UNLOCK",
- "GD_OP_EVENT_ALL_ACK",
- "GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP",
- "GD_OP_EVENT_INVALID"
-};
-
-char*
-glusterd_op_sm_state_name_get (int state)
+ "GD_OP_EVENT_NONE", "GD_OP_EVENT_START_LOCK",
+ "GD_OP_EVENT_LOCK", "GD_OP_EVENT_RCVD_ACC",
+ "GD_OP_EVENT_ALL_ACC", "GD_OP_EVENT_STAGE_ACC",
+ "GD_OP_EVENT_COMMIT_ACC", "GD_OP_EVENT_RCVD_RJT",
+ "GD_OP_EVENT_STAGE_OP", "GD_OP_EVENT_COMMIT_OP",
+ "GD_OP_EVENT_UNLOCK", "GD_OP_EVENT_START_UNLOCK",
+ "GD_OP_EVENT_ALL_ACK", "GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP",
+ "GD_OP_EVENT_INVALID"};
+
+char *
+glusterd_op_sm_state_name_get(int state)
{
- if (state < 0 || state >= GD_OP_STATE_MAX)
- return glusterd_op_sm_state_names[GD_OP_STATE_MAX];
- return glusterd_op_sm_state_names[state];
+ if (state < 0 || state >= GD_OP_STATE_MAX)
+ return glusterd_op_sm_state_names[GD_OP_STATE_MAX];
+ return glusterd_op_sm_state_names[state];
}
-char*
-glusterd_op_sm_event_name_get (int event)
+char *
+glusterd_op_sm_event_name_get(int event)
{
- if (event < 0 || event >= GD_OP_EVENT_MAX)
- return glusterd_op_sm_event_names[GD_OP_EVENT_MAX];
- return glusterd_op_sm_event_names[event];
+ if (event < 0 || event >= GD_OP_EVENT_MAX)
+ return glusterd_op_sm_event_names[GD_OP_EVENT_MAX];
+ return glusterd_op_sm_event_names[event];
}
void
-glusterd_destroy_lock_ctx (glusterd_op_lock_ctx_t *ctx)
+glusterd_destroy_lock_ctx(glusterd_op_lock_ctx_t *ctx)
{
- if (!ctx)
- return;
- GF_FREE (ctx);
+ if (!ctx)
+ return;
+ GF_FREE(ctx);
}
void
-glusterd_set_volume_status (glusterd_volinfo_t *volinfo,
- glusterd_volume_status status)
+glusterd_set_volume_status(glusterd_volinfo_t *volinfo,
+ glusterd_volume_status status)
{
- GF_ASSERT (volinfo);
- volinfo->status = status;
+ GF_ASSERT(volinfo);
+ volinfo->status = status;
}
static int
-glusterd_op_sm_inject_all_acc (uuid_t *txn_id)
+glusterd_op_sm_inject_all_acc(uuid_t *txn_id)
{
- int32_t ret = -1;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC, txn_id, NULL);
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ int32_t ret = -1;
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, txn_id, NULL);
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_check_bitrot_cmd (char *key, char *value, char *errstr, size_t size)
+glusterd_check_bitrot_cmd(char *key, char *value, char *errstr, size_t size)
{
- int ret = -1;
-
- if ((!strncmp (key, "bitrot", SLEN ("bitrot"))) ||
- (!strncmp (key, "features.bitrot", SLEN ("features.bitrot")))) {
- snprintf (errstr, size, " 'gluster volume set <VOLNAME> %s' "
- "is invalid command. Use 'gluster volume bitrot "
- "<VOLNAME> {enable|disable}' instead.", key);
- ret = -1;
- goto out;
- } else if ((!strncmp (key, "scrub-freq", SLEN ("scrub-freq"))) ||
- (!strncmp (key, "features.scrub-freq",
- SLEN ("features.scrub-freq")))) {
- snprintf (errstr, size, " 'gluster volume "
- "set <VOLNAME> %s' is invalid command. Use 'gluster "
- "volume bitrot <VOLNAME> scrub-frequency"
- " {hourly|daily|weekly|biweekly|monthly}' instead.",
- key);
- ret = -1;
- goto out;
- } else if ((!strncmp (key, "scrub", SLEN ("scrub"))) ||
- (!strncmp (key, "features.scrub",
- SLEN ("features.scrub")))) {
- snprintf (errstr, size, " 'gluster volume set <VOLNAME> %s' is "
- "invalid command. Use 'gluster volume bitrot "
- "<VOLNAME> scrub {pause|resume}' instead.", key);
- ret = -1;
- goto out;
- } else if ((!strncmp (key, "scrub-throttle",
- SLEN ("scrub-throttle"))) ||
- (!strncmp (key, "features.scrub-throttle",
- SLEN ("features.scrub-throttle")))) {
- snprintf (errstr, size, " 'gluster volume set <VOLNAME> %s' is "
- "invalid command. Use 'gluster volume bitrot "
- "<VOLNAME> scrub-throttle {lazy|normal|aggressive}' "
- "instead.",
- key);
- ret = -1;
- goto out;
- }
+ int ret = -1;
+
+ if ((!strncmp(key, "bitrot", SLEN("bitrot"))) ||
+ (!strncmp(key, "features.bitrot", SLEN("features.bitrot")))) {
+ snprintf(errstr, size,
+ " 'gluster volume set <VOLNAME> %s' "
+ "is invalid command. Use 'gluster volume bitrot "
+ "<VOLNAME> {enable|disable}' instead.",
+ key);
+ ret = -1;
+ goto out;
+ } else if ((!strncmp(key, "scrub-freq", SLEN("scrub-freq"))) ||
+ (!strncmp(key, "features.scrub-freq",
+ SLEN("features.scrub-freq")))) {
+ snprintf(errstr, size,
+ " 'gluster volume "
+ "set <VOLNAME> %s' is invalid command. Use 'gluster "
+ "volume bitrot <VOLNAME> scrub-frequency"
+ " {hourly|daily|weekly|biweekly|monthly}' instead.",
+ key);
+ ret = -1;
+ goto out;
+ } else if ((!strncmp(key, "scrub", SLEN("scrub"))) ||
+ (!strncmp(key, "features.scrub", SLEN("features.scrub")))) {
+ snprintf(errstr, size,
+ " 'gluster volume set <VOLNAME> %s' is "
+ "invalid command. Use 'gluster volume bitrot "
+ "<VOLNAME> scrub {pause|resume}' instead.",
+ key);
+ ret = -1;
+ goto out;
+ } else if ((!strncmp(key, "scrub-throttle", SLEN("scrub-throttle"))) ||
+ (!strncmp(key, "features.scrub-throttle",
+ SLEN("features.scrub-throttle")))) {
+ snprintf(errstr, size,
+ " 'gluster volume set <VOLNAME> %s' is "
+ "invalid command. Use 'gluster volume bitrot "
+ "<VOLNAME> scrub-throttle {lazy|normal|aggressive}' "
+ "instead.",
+ key);
+ ret = -1;
+ goto out;
+ }
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
static int
-glusterd_check_quota_cmd (char *key, char *value, char *errstr, size_t size)
+glusterd_check_quota_cmd(char *key, char *value, char *errstr, size_t size)
{
- int ret = -1;
- gf_boolean_t b = _gf_false;
+ int ret = -1;
+ gf_boolean_t b = _gf_false;
- if ((strcmp (key, "quota") == 0) ||
- (strcmp (key, "features.quota") == 0)) {
- ret = gf_string2boolean (value, &b);
- if (ret)
- goto out;
- if (b) {
- snprintf (errstr, size, " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "quota <VOLNAME> enable' instead.",
- key, value);
- ret = -1;
- goto out;
- } else {
- snprintf (errstr, size, " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "quota <VOLNAME> disable' instead.",
- key, value);
- ret = -1;
- goto out;
- }
- } else if ((strcmp (key, "inode-quota") == 0) ||
- (strcmp (key, "features.inode-quota") == 0)) {
- ret = gf_string2boolean (value, &b);
- if (ret)
- goto out;
- if (b) {
- snprintf (errstr, size, " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "inode-quota <VOLNAME> enable' instead.",
- key, value);
- ret = -1;
- goto out;
- } else {
- /* inode-quota disable not supported,
- * use quota disable
- */
- snprintf (errstr, size, " 'gluster "
- "volume set <VOLNAME> %s %s' is "
- "deprecated. Use 'gluster volume "
- "quota <VOLNAME> disable' instead.",
- key, value);
- ret = -1;
- goto out;
- }
- }
-
- ret = 0;
+ if ((strcmp(key, "quota") == 0) || (strcmp(key, "features.quota") == 0)) {
+ ret = gf_string2boolean(value, &b);
+ if (ret)
+ goto out;
+ if (b) {
+ snprintf(errstr, size,
+ " 'gluster "
+ "volume set <VOLNAME> %s %s' is "
+ "deprecated. Use 'gluster volume "
+ "quota <VOLNAME> enable' instead.",
+ key, value);
+ ret = -1;
+ goto out;
+ } else {
+ snprintf(errstr, size,
+ " 'gluster "
+ "volume set <VOLNAME> %s %s' is "
+ "deprecated. Use 'gluster volume "
+ "quota <VOLNAME> disable' instead.",
+ key, value);
+ ret = -1;
+ goto out;
+ }
+ } else if ((strcmp(key, "inode-quota") == 0) ||
+ (strcmp(key, "features.inode-quota") == 0)) {
+ ret = gf_string2boolean(value, &b);
+ if (ret)
+ goto out;
+ if (b) {
+ snprintf(errstr, size,
+ " 'gluster "
+ "volume set <VOLNAME> %s %s' is "
+ "deprecated. Use 'gluster volume "
+ "inode-quota <VOLNAME> enable' instead.",
+ key, value);
+ ret = -1;
+ goto out;
+ } else {
+ /* inode-quota disable not supported,
+ * use quota disable
+ */
+ snprintf(errstr, size,
+ " 'gluster "
+ "volume set <VOLNAME> %s %s' is "
+ "deprecated. Use 'gluster volume "
+ "quota <VOLNAME> disable' instead.",
+ key, value);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
int
-glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickinfo,
- gd1_mgmt_brick_op_req **req, dict_t *dict)
+glusterd_brick_op_build_payload(glusterd_op_t op,
+ glusterd_brickinfo_t *brickinfo,
+ gd1_mgmt_brick_op_req **req, dict_t *dict)
{
- int ret = -1;
- gd1_mgmt_brick_op_req *brick_req = NULL;
- char *volname = NULL;
- char name[1024] = {0,};
- gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
- xlator_t *this = NULL;
- glusterd_volinfo_t *volinfo = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (op < GD_OP_MAX);
- GF_ASSERT (op > GD_OP_NONE);
- GF_ASSERT (req);
-
-
- switch (op) {
+ int ret = -1;
+ gd1_mgmt_brick_op_req *brick_req = NULL;
+ char *volname = NULL;
+ char name[1024] = {
+ 0,
+ };
+ gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
+ xlator_t *this = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(op < GD_OP_MAX);
+ GF_ASSERT(op > GD_OP_NONE);
+ GF_ASSERT(req);
+
+ switch (op) {
case GD_OP_REMOVE_BRICK:
case GD_OP_STOP_VOLUME:
- brick_req = GF_CALLOC (1, sizeof (*brick_req),
- gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
- goto out;
- brick_req->op = GLUSTERD_BRICK_TERMINATE;
- brick_req->name = brickinfo->path;
- glusterd_set_brick_status (brickinfo, GF_BRICK_STOPPING);
- break;
+ brick_req = GF_CALLOC(1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req)
+ goto out;
+ brick_req->op = GLUSTERD_BRICK_TERMINATE;
+ brick_req->name = brickinfo->path;
+ glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPING);
+ break;
case GD_OP_PROFILE_VOLUME:
- brick_req = GF_CALLOC (1, sizeof (*brick_req),
- gf_gld_mt_mop_brick_req_t);
+ brick_req = GF_CALLOC(1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
- goto out;
+ if (!brick_req)
+ goto out;
- brick_req->op = GLUSTERD_BRICK_XLATOR_INFO;
- brick_req->name = brickinfo->path;
+ brick_req->op = GLUSTERD_BRICK_XLATOR_INFO;
+ brick_req->name = brickinfo->path;
- break;
- case GD_OP_HEAL_VOLUME:
- {
- brick_req = GF_CALLOC (1, sizeof (*brick_req),
- gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
- goto out;
+ break;
+ case GD_OP_HEAL_VOLUME: {
+ brick_req = GF_CALLOC(1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req)
+ goto out;
- brick_req->op = GLUSTERD_BRICK_XLATOR_OP;
- brick_req->name = "";
- ret = dict_get_int32n (dict, "heal-op", SLEN ("heal-op"),
- (int32_t*)&heal_op);
- if (ret)
- goto out;
- ret = dict_set_int32n (dict, "xl-op", SLEN ("xl-op"),
- heal_op);
- if (ret)
- goto out;
- }
- break;
- case GD_OP_STATUS_VOLUME:
- {
- brick_req = GF_CALLOC (1, sizeof (*brick_req),
- gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
- goto out;
- brick_req->op = GLUSTERD_BRICK_STATUS;
- brick_req->name = "";
- ret = dict_set_strn (dict, "brick-name", SLEN ("brick-name"),
- brickinfo->path);
- if (ret)
- goto out;
- }
- break;
+ brick_req->op = GLUSTERD_BRICK_XLATOR_OP;
+ brick_req->name = "";
+ ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"),
+ (int32_t *)&heal_op);
+ if (ret)
+ goto out;
+ ret = dict_set_int32n(dict, "xl-op", SLEN("xl-op"), heal_op);
+ if (ret)
+ goto out;
+ } break;
+ case GD_OP_STATUS_VOLUME: {
+ brick_req = GF_CALLOC(1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req)
+ goto out;
+ brick_req->op = GLUSTERD_BRICK_STATUS;
+ brick_req->name = "";
+ ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"),
+ brickinfo->path);
+ if (ret)
+ goto out;
+ } break;
case GD_OP_REBALANCE:
case GD_OP_DETACH_TIER_STATUS:
case GD_OP_TIER_STATUS:
case GD_OP_DEFRAG_BRICK_VOLUME:
- brick_req = GF_CALLOC (1, sizeof (*brick_req),
- gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
- goto out;
+ brick_req = GF_CALLOC(1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req)
+ goto out;
- brick_req->op = GLUSTERD_BRICK_XLATOR_DEFRAG;
- ret = dict_get_strn (dict, "volname", SLEN ("volname"),
- &volname);
- if (ret)
- goto out;
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret)
- goto out;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- snprintf (name, sizeof (name), "%s-tier-dht", volname);
- else
- snprintf (name, sizeof (name), "%s-dht", volname);
- brick_req->name = gf_strdup (name);
+ brick_req->op = GLUSTERD_BRICK_XLATOR_DEFRAG;
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret)
+ goto out;
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret)
+ goto out;
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER)
+ snprintf(name, sizeof(name), "%s-tier-dht", volname);
+ else
+ snprintf(name, sizeof(name), "%s-dht", volname);
+ brick_req->name = gf_strdup(name);
- break;
+ break;
case GD_OP_SNAP:
case GD_OP_BARRIER:
- brick_req = GF_CALLOC (1, sizeof(*brick_req),
- gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
- goto out;
- brick_req->op = GLUSTERD_BRICK_BARRIER;
- brick_req->name = brickinfo->path;
- break;
+ brick_req = GF_CALLOC(1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req)
+ goto out;
+ brick_req->op = GLUSTERD_BRICK_BARRIER;
+ brick_req->name = brickinfo->path;
+ break;
default:
- goto out;
- break;
- }
+ goto out;
+ break;
+ }
- ret = dict_allocate_and_serialize (dict, &brick_req->input.input_val,
- &brick_req->input.input_len);
- if (ret)
- goto out;
- *req = brick_req;
- ret = 0;
+ ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
+ &brick_req->input.input_len);
+ if (ret)
+ goto out;
+ *req = brick_req;
+ ret = 0;
out:
- if (ret && brick_req)
- GF_FREE (brick_req);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ if (ret && brick_req)
+ GF_FREE(brick_req);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
int
-glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,
- dict_t *dict)
+glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
+ dict_t *dict)
{
- int ret = -1;
- gd1_mgmt_brick_op_req *brick_req = NULL;
- char *volname = NULL;
+ int ret = -1;
+ gd1_mgmt_brick_op_req *brick_req = NULL;
+ char *volname = NULL;
- GF_ASSERT (op < GD_OP_MAX);
- GF_ASSERT (op > GD_OP_NONE);
- GF_ASSERT (req);
+ GF_ASSERT(op < GD_OP_MAX);
+ GF_ASSERT(op > GD_OP_NONE);
+ GF_ASSERT(req);
- switch (op) {
+ switch (op) {
case GD_OP_PROFILE_VOLUME:
- brick_req = GF_CALLOC (1, sizeof (*brick_req),
- gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
- goto out;
+ brick_req = GF_CALLOC(1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req)
+ goto out;
- brick_req->op = GLUSTERD_NODE_PROFILE;
- brick_req->name = "";
+ brick_req->op = GLUSTERD_NODE_PROFILE;
+ brick_req->name = "";
- break;
+ break;
case GD_OP_STATUS_VOLUME:
- brick_req = GF_CALLOC (1, sizeof (*brick_req),
- gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
- goto out;
+ brick_req = GF_CALLOC(1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req)
+ goto out;
- brick_req->op = GLUSTERD_NODE_STATUS;
- brick_req->name = "";
+ brick_req->op = GLUSTERD_NODE_STATUS;
+ brick_req->name = "";
- break;
+ break;
case GD_OP_SCRUB_STATUS:
case GD_OP_SCRUB_ONDEMAND:
- brick_req = GF_CALLOC (1, sizeof(*brick_req),
- gf_gld_mt_mop_brick_req_t);
- if (!brick_req)
- goto out;
+ brick_req = GF_CALLOC(1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req)
+ goto out;
- brick_req->op = GLUSTERD_NODE_BITROT;
+ brick_req->op = GLUSTERD_NODE_BITROT;
- ret = dict_get_strn (dict, "volname", SLEN ("volname"),
- &volname);
- if (ret)
- goto out;
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret)
+ goto out;
- brick_req->name = gf_strdup (volname);
- break;
+ brick_req->name = gf_strdup(volname);
+ break;
default:
- goto out;
- }
+ goto out;
+ }
- ret = dict_allocate_and_serialize (dict, &brick_req->input.input_val,
- &brick_req->input.input_len);
+ ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
+ &brick_req->input.input_len);
- if (ret)
- goto out;
+ if (ret)
+ goto out;
- *req = brick_req;
- ret = 0;
+ *req = brick_req;
+ ret = 0;
out:
- if (ret && brick_req)
- GF_FREE (brick_req);
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
- return ret;
+ if (ret && brick_req)
+ GF_FREE(brick_req);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_validate_quorum_options (xlator_t *this, char *fullkey, char *value,
- char **op_errstr)
+glusterd_validate_quorum_options(xlator_t *this, char *fullkey, char *value,
+ char **op_errstr)
{
- int ret = 0;
- char *key = NULL;
- volume_option_t *opt = NULL;
-
- if (!glusterd_is_quorum_option (fullkey))
- goto out;
- key = strchr (fullkey, '.');
- if (key == NULL) {
- ret = -1;
- goto out;
- }
- key++;
- opt = xlator_volume_option_get (this, key);
- if (!opt) {
- ret = -1;
- goto out;
- }
- ret = xlator_option_validate (this, key, value, opt, op_errstr);
+ int ret = 0;
+ char *key = NULL;
+ volume_option_t *opt = NULL;
+
+ if (!glusterd_is_quorum_option(fullkey))
+ goto out;
+ key = strchr(fullkey, '.');
+ if (key == NULL) {
+ ret = -1;
+ goto out;
+ }
+ key++;
+ opt = xlator_volume_option_get(this, key);
+ if (!opt) {
+ ret = -1;
+ goto out;
+ }
+ ret = xlator_option_validate(this, key, value, opt, op_errstr);
out:
- return ret;
+ return ret;
}
static int
-glusterd_validate_brick_mx_options (xlator_t *this, char *fullkey, char *value,
- char **op_errstr)
+glusterd_validate_brick_mx_options(xlator_t *this, char *fullkey, char *value,
+ char **op_errstr)
{
- int ret = 0;
+ int ret = 0;
- //Placeholder function for now
+ // Placeholder function for now
- return ret;
+ return ret;
}
static int
-glusterd_validate_shared_storage (char *key, char *value, char *errstr)
+glusterd_validate_shared_storage(char *key, char *value, char *errstr)
{
- int32_t ret = -1;
- int32_t exists = -1;
- int32_t count = -1;
- char *op = NULL;
- char hook_script[PATH_MAX] = "";
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, conf, out);
-
- GF_VALIDATE_OR_GOTO (this->name, key, out);
- GF_VALIDATE_OR_GOTO (this->name, value, out);
- GF_VALIDATE_OR_GOTO (this->name, errstr, out);
-
- ret = 0;
-
- if (strcmp (key, GLUSTERD_SHARED_STORAGE_KEY)) {
- goto out;
- }
-
- if ((strcmp (value, "enable")) &&
- (strcmp (value, "disable"))) {
- snprintf (errstr, PATH_MAX,
- "Invalid option(%s). Valid options "
- "are 'enable' and 'disable'", value);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", errstr);
- ret = -1;
- goto out;
- }
-
- len = snprintf (hook_script, sizeof(hook_script),
- "%s"GLUSTERD_SHRD_STRG_HOOK_SCRIPT, conf->workdir);
- if ((len < 0) || (len >= sizeof(hook_script))) {
- ret = -1;
- goto out;
- }
-
- ret = sys_access (hook_script, R_OK|X_OK);
- if (ret) {
- len = snprintf (errstr, PATH_MAX,
- "The hook-script (%s) required "
- "for this operation is not present. "
- "Please install the hook-script "
- "and retry", hook_script);
- if (len < 0) {
- strncpy(errstr, "<error>", PATH_MAX);
- }
- gf_msg (this->name, GF_LOG_ERROR, ENOENT,
- GD_MSG_FILE_OP_FAILED, "%s", errstr);
- goto out;
- }
-
- if (!strncmp (value, "disable", SLEN ("disable"))) {
- ret = dict_get_strn (conf->opts, GLUSTERD_SHARED_STORAGE_KEY,
- SLEN (GLUSTERD_SHARED_STORAGE_KEY), &op);
- if (ret || !strncmp (op, "disable", SLEN ("disable"))) {
- snprintf (errstr, PATH_MAX, "Shared storage volume "
- "does not exist. Please enable shared storage"
- " for creating shared storage volume.");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SHARED_STORAGE_DOES_NOT_EXIST, "%s",
- errstr);
- ret = -1;
- goto out;
- }
- goto out;
- }
-
- exists = glusterd_check_volume_exists (GLUSTER_SHARED_STORAGE);
- if (exists) {
- snprintf (errstr, PATH_MAX,
- "Shared storage volume("GLUSTER_SHARED_STORAGE
- ") already exists.");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_ALREADY_EXIST, "%s", errstr);
- ret = -1;
- goto out;
- }
-
- ret = glusterd_count_connected_peers (&count);
- if (ret) {
- snprintf (errstr, PATH_MAX,
- "Failed to calculate number of connected peers.");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_PEER_COUNT_GET_FAIL, "%s", errstr);
- goto out;
- }
+ int32_t ret = -1;
+ int32_t exists = -1;
+ int32_t count = -1;
+ char *op = NULL;
+ char hook_script[PATH_MAX] = "";
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, key, out);
+ GF_VALIDATE_OR_GOTO(this->name, value, out);
+ GF_VALIDATE_OR_GOTO(this->name, errstr, out);
+
+ ret = 0;
+
+ if (strcmp(key, GLUSTERD_SHARED_STORAGE_KEY)) {
+ goto out;
+ }
+
+ if ((strcmp(value, "enable")) && (strcmp(value, "disable"))) {
+ snprintf(errstr, PATH_MAX,
+ "Invalid option(%s). Valid options "
+ "are 'enable' and 'disable'",
+ value);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
+ errstr);
+ ret = -1;
+ goto out;
+ }
- if (count <= 1) {
- snprintf (errstr, PATH_MAX,
- "More than one node should "
- "be up/present in the cluster to enable this option");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_INSUFFICIENT_UP_NODES, "%s", errstr);
- ret = -1;
- goto out;
- }
+ len = snprintf(hook_script, sizeof(hook_script),
+ "%s" GLUSTERD_SHRD_STRG_HOOK_SCRIPT, conf->workdir);
+ if ((len < 0) || (len >= sizeof(hook_script))) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = sys_access(hook_script, R_OK | X_OK);
+ if (ret) {
+ len = snprintf(errstr, PATH_MAX,
+ "The hook-script (%s) required "
+ "for this operation is not present. "
+ "Please install the hook-script "
+ "and retry",
+ hook_script);
+ if (len < 0) {
+ strncpy(errstr, "<error>", PATH_MAX);
+ }
+ gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED, "%s",
+ errstr);
+ goto out;
+ }
+
+ if (!strncmp(value, "disable", SLEN("disable"))) {
+ ret = dict_get_strn(conf->opts, GLUSTERD_SHARED_STORAGE_KEY,
+ SLEN(GLUSTERD_SHARED_STORAGE_KEY), &op);
+ if (ret || !strncmp(op, "disable", SLEN("disable"))) {
+ snprintf(errstr, PATH_MAX,
+ "Shared storage volume "
+ "does not exist. Please enable shared storage"
+ " for creating shared storage volume.");
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SHARED_STORAGE_DOES_NOT_EXIST, "%s", errstr);
+ ret = -1;
+ goto out;
+ }
+ goto out;
+ }
+
+ exists = glusterd_check_volume_exists(GLUSTER_SHARED_STORAGE);
+ if (exists) {
+ snprintf(errstr, PATH_MAX,
+ "Shared storage volume(" GLUSTER_SHARED_STORAGE
+ ") already exists.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_EXIST, "%s",
+ errstr);
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_count_connected_peers(&count);
+ if (ret) {
+ snprintf(errstr, PATH_MAX,
+ "Failed to calculate number of connected peers.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_COUNT_GET_FAIL, "%s",
+ errstr);
+ goto out;
+ }
+
+ if (count <= 1) {
+ snprintf(errstr, PATH_MAX,
+ "More than one node should "
+ "be up/present in the cluster to enable this option");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INSUFFICIENT_UP_NODES, "%s",
+ errstr);
+ ret = -1;
+ goto out;
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_validate_localtime_logging (char *key, char *value, char *errstr)
+glusterd_validate_localtime_logging(char *key, char *value, char *errstr)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- int already_enabled = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, conf, out);
-
- GF_VALIDATE_OR_GOTO (this->name, key, out);
- GF_VALIDATE_OR_GOTO (this->name, value, out);
- GF_VALIDATE_OR_GOTO (this->name, errstr, out);
-
- ret = 0;
-
- if (strcmp (key, GLUSTERD_LOCALTIME_LOGGING_KEY)) {
- goto out;
- }
-
- if ((strcmp (value, "enable")) &&
- (strcmp (value, "disable"))) {
- snprintf (errstr, PATH_MAX,
- "Invalid option(%s). Valid options "
- "are 'enable' and 'disable'", value);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", errstr);
- ret = -1;
- }
-
- already_enabled = gf_log_get_localtime ();
-
- if (strcmp (value, "enable") == 0) {
- gf_log_set_localtime (1);
- if (!already_enabled)
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_LOCALTIME_LOGGING_ENABLE,
- "localtime logging enable");
- } else if (strcmp (value, "disable") == 0) {
- gf_log_set_localtime (0);
- if (already_enabled)
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_LOCALTIME_LOGGING_DISABLE,
- "localtime logging disable");
- }
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ int already_enabled = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, key, out);
+ GF_VALIDATE_OR_GOTO(this->name, value, out);
+ GF_VALIDATE_OR_GOTO(this->name, errstr, out);
+
+ ret = 0;
+
+ if (strcmp(key, GLUSTERD_LOCALTIME_LOGGING_KEY)) {
+ goto out;
+ }
+
+ if ((strcmp(value, "enable")) && (strcmp(value, "disable"))) {
+ snprintf(errstr, PATH_MAX,
+ "Invalid option(%s). Valid options "
+ "are 'enable' and 'disable'",
+ value);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
+ errstr);
+ ret = -1;
+ }
+
+ already_enabled = gf_log_get_localtime();
+
+ if (strcmp(value, "enable") == 0) {
+ gf_log_set_localtime(1);
+ if (!already_enabled)
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_LOCALTIME_LOGGING_ENABLE,
+ "localtime logging enable");
+ } else if (strcmp(value, "disable") == 0) {
+ gf_log_set_localtime(0);
+ if (already_enabled)
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_LOCALTIME_LOGGING_DISABLE,
+ "localtime logging disable");
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_validate_daemon_log_level (char *key, char *value, char *errstr)
+glusterd_validate_daemon_log_level(char *key, char *value, char *errstr)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, conf, out);
-
- GF_VALIDATE_OR_GOTO (this->name, key, out);
- GF_VALIDATE_OR_GOTO (this->name, value, out);
- GF_VALIDATE_OR_GOTO (this->name, errstr, out);
-
- ret = 0;
-
- if (strcmp (key, GLUSTERD_DAEMON_LOG_LEVEL_KEY)) {
- goto out;
- }
-
- if ((strcmp (value, "INFO")) &&
- (strcmp (value, "WARNING")) &&
- (strcmp (value, "DEBUG")) &&
- (strcmp (value, "TRACE")) &&
- (strcmp (value, "ERROR"))) {
- snprintf (errstr, PATH_MAX,
- "Invalid option(%s). Valid options "
- "are 'INFO' or 'WARNING' or 'ERROR' or 'DEBUG' or "
- " 'TRACE'", value);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", errstr);
- ret = -1;
- }
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, key, out);
+ GF_VALIDATE_OR_GOTO(this->name, value, out);
+ GF_VALIDATE_OR_GOTO(this->name, errstr, out);
+
+ ret = 0;
+
+ if (strcmp(key, GLUSTERD_DAEMON_LOG_LEVEL_KEY)) {
+ goto out;
+ }
+
+ if ((strcmp(value, "INFO")) && (strcmp(value, "WARNING")) &&
+ (strcmp(value, "DEBUG")) && (strcmp(value, "TRACE")) &&
+ (strcmp(value, "ERROR"))) {
+ snprintf(errstr, PATH_MAX,
+ "Invalid option(%s). Valid options "
+ "are 'INFO' or 'WARNING' or 'ERROR' or 'DEBUG' or "
+ " 'TRACE'",
+ value);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
+ errstr);
+ ret = -1;
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_op_stage_set_volume (dict_t *dict, char **op_errstr)
+glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
{
- int ret = -1;
- char *volname = NULL;
- int exists = 0;
- char *key = NULL;
- char *key_fixed = NULL;
- char *value = NULL;
- char *val_dup = NULL;
- char keystr[100] = {0, };
- int keylen;
- char *trash_path = NULL;
- int trash_path_len = 0;
- int count = 0;
- int dict_count = 0;
- char errstr[PATH_MAX] = {0, };
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- dict_t *val_dict = NULL;
- gf_boolean_t global_opt = _gf_false;
- glusterd_volinfo_t *voliter = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- uint32_t new_op_version = GD_OP_VERSION_MIN;
- uint32_t local_new_op_version = GD_OP_VERSION_MIN;
- uint32_t local_new_client_op_version = GD_OP_VERSION_MIN;
- uint32_t key_op_version = GD_OP_VERSION_MIN;
- uint32_t local_key_op_version = GD_OP_VERSION_MIN;
- gf_boolean_t origin_glusterd = _gf_true;
- gf_boolean_t check_op_version = _gf_true;
- gf_boolean_t trash_enabled = _gf_false;
- gf_boolean_t all_vol = _gf_false;
-
- GF_ASSERT (dict);
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- val_dict = dict_new();
- if (!val_dict)
- goto out;
-
- /* Check if we can support the required op-version
- * This check is not done on the originator glusterd. The originator
- * glusterd sets this value.
- */
- origin_glusterd = is_origin_glusterd (dict);
-
- if (!origin_glusterd) {
- /* Check for v3.3.x origin glusterd */
- check_op_version = dict_get_str_boolean (dict,
- "check-op-version",
- _gf_false);
-
- if (check_op_version) {
- ret = dict_get_uint32 (dict, "new-op-version",
- &new_op_version);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get new_op_version");
- goto out;
- }
-
- if ((new_op_version > GD_OP_VERSION_MAX) ||
- (new_op_version < GD_OP_VERSION_MIN)) {
- ret = -1;
- snprintf (errstr, sizeof (errstr),
- "Required op_version (%d) is not "
- "supported. Max supported op version "
- "is %d", new_op_version,
- priv->op_version);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_UNSUPPORTED_VERSION, "%s",
- errstr);
- goto out;
- }
- }
- }
+ int ret = -1;
+ char *volname = NULL;
+ int exists = 0;
+ char *key = NULL;
+ char *key_fixed = NULL;
+ char *value = NULL;
+ char *val_dup = NULL;
+ char keystr[100] = {
+ 0,
+ };
+ int keylen;
+ char *trash_path = NULL;
+ int trash_path_len = 0;
+ int count = 0;
+ int dict_count = 0;
+ char errstr[PATH_MAX] = {
+ 0,
+ };
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ dict_t *val_dict = NULL;
+ gf_boolean_t global_opt = _gf_false;
+ glusterd_volinfo_t *voliter = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ uint32_t new_op_version = GD_OP_VERSION_MIN;
+ uint32_t local_new_op_version = GD_OP_VERSION_MIN;
+ uint32_t local_new_client_op_version = GD_OP_VERSION_MIN;
+ uint32_t key_op_version = GD_OP_VERSION_MIN;
+ uint32_t local_key_op_version = GD_OP_VERSION_MIN;
+ gf_boolean_t origin_glusterd = _gf_true;
+ gf_boolean_t check_op_version = _gf_true;
+ gf_boolean_t trash_enabled = _gf_false;
+ gf_boolean_t all_vol = _gf_false;
+
+ GF_ASSERT(dict);
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ val_dict = dict_new();
+ if (!val_dict)
+ goto out;
+
+ /* Check if we can support the required op-version
+ * This check is not done on the originator glusterd. The originator
+ * glusterd sets this value.
+ */
+ origin_glusterd = is_origin_glusterd(dict);
+
+ if (!origin_glusterd) {
+ /* Check for v3.3.x origin glusterd */
+ check_op_version = dict_get_str_boolean(dict, "check-op-version",
+ _gf_false);
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &dict_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Count(dict),not set in Volume-Set");
+ if (check_op_version) {
+ ret = dict_get_uint32(dict, "new-op-version", &new_op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get new_op_version");
goto out;
- }
+ }
- if (dict_count == 0) {
+ if ((new_op_version > GD_OP_VERSION_MAX) ||
+ (new_op_version < GD_OP_VERSION_MIN)) {
+ ret = -1;
+ snprintf(errstr, sizeof(errstr),
+ "Required op_version (%d) is not "
+ "supported. Max supported op version "
+ "is %d",
+ new_op_version, priv->op_version);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
+ "%s", errstr);
+ goto out;
+ }
+ }
+ }
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &dict_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Count(dict),not set in Volume-Set");
+ goto out;
+ }
+
+ if (dict_count == 0) {
/*No options would be specified of volume set help */
- if (dict_getn (dict, "help", SLEN ("help"))) {
- ret = 0;
- goto out;
- }
+ if (dict_getn(dict, "help", SLEN("help"))) {
+ ret = 0;
+ goto out;
+ }
- if (dict_getn (dict, "help-xml", SLEN ("help-xml"))) {
+ if (dict_getn(dict, "help-xml", SLEN("help-xml"))) {
#if (HAVE_LIB_XML)
- ret = 0;
- goto out;
+ ret = 0;
+ goto out;
#else
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MODULE_NOT_INSTALLED,
- "libxml not present in the system");
- *op_errstr = gf_strdup ("Error: xml libraries not "
- "present to produce xml-output");
- goto out;
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MODULE_NOT_INSTALLED,
+ "libxml not present in the system");
+ *op_errstr = gf_strdup(
+ "Error: xml libraries not "
+ "present to produce xml-output");
+ goto out;
#endif
- }
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_NO_OPTIONS_GIVEN, "No options received ");
- *op_errstr = gf_strdup ("Options not specified");
- ret = -1;
- goto out;
}
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_OPTIONS_GIVEN,
+ "No options received ");
+ *op_errstr = gf_strdup("Options not specified");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ if (strcasecmp(volname, "all") != 0) {
+ exists = glusterd_check_volume_exists(volname);
+ if (!exists) {
+ snprintf(errstr, sizeof(errstr), FMTSTR_CHECK_VOL_EXISTS, volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
+ errstr);
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get volume name");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
}
- if (strcasecmp (volname, "all") != 0) {
- exists = glusterd_check_volume_exists (volname);
- if (!exists) {
- snprintf (errstr, sizeof (errstr),
- FMTSTR_CHECK_VOL_EXISTS, volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", errstr);
- ret = -1;
- goto out;
- }
+ ret = glusterd_validate_volume_id(dict, volinfo);
+ if (ret)
+ goto out;
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND,
- FMTSTR_CHECK_VOL_EXISTS, volname);
- goto out;
- }
+ local_new_op_version = volinfo->op_version;
+ local_new_client_op_version = volinfo->client_op_version;
- ret = glusterd_validate_volume_id (dict, volinfo);
- if (ret)
- goto out;
+ } else {
+ all_vol = _gf_true;
+ }
- local_new_op_version = volinfo->op_version;
- local_new_client_op_version = volinfo->client_op_version;
+ for (count = 1; ret != 1; count++) {
+ global_opt = _gf_false;
+ keylen = sprintf(keystr, "key%d", count);
+ ret = dict_get_strn(dict, keystr, keylen, &key);
+ if (ret)
+ break;
- } else {
- all_vol = _gf_true;
+ keylen = sprintf(keystr, "value%d", count);
+ ret = dict_get_strn(dict, keystr, keylen, &value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "invalid key,value pair in 'volume set'");
+ ret = -1;
+ goto out;
+ }
+
+ if (strcmp(key, "config.memory-accounting") == 0) {
+ gf_msg_debug(this->name, 0,
+ "enabling memory accounting for volume %s", volname);
+ ret = 0;
+ }
+
+ if (strcmp(key, "config.transport") == 0) {
+ gf_msg_debug(this->name, 0, "changing transport-type for volume %s",
+ volname);
+ ret = 0;
+ /* if value is none of 'tcp/rdma/tcp,rdma' error out */
+ if (!((strcasecmp(value, "rdma") == 0) ||
+ (strcasecmp(value, "tcp") == 0) ||
+ (strcasecmp(value, "tcp,rdma") == 0) ||
+ (strcasecmp(value, "rdma,tcp") == 0))) {
+ ret = snprintf(errstr, sizeof(errstr),
+ "transport-type %s does "
+ "not exist",
+ value);
+ /* lets not bother about above return value,
+ its a failure anyways */
+ ret = -1;
+ goto out;
+ }
}
- for ( count = 1; ret != 1 ; count++ ) {
- global_opt = _gf_false;
- keylen = sprintf (keystr, "key%d", count);
- ret = dict_get_strn (dict, keystr, keylen, &key);
- if (ret)
- break;
-
- keylen = sprintf (keystr, "value%d", count);
- ret = dict_get_strn (dict, keystr, keylen, &value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "invalid key,value pair in 'volume set'");
- ret = -1;
- goto out;
- }
-
- if (strcmp (key, "config.memory-accounting") == 0) {
- gf_msg_debug (this->name, 0,
- "enabling memory accounting for volume %s",
- volname);
- ret = 0;
- }
-
- if (strcmp (key, "config.transport") == 0) {
- gf_msg_debug (this->name, 0,
- "changing transport-type for volume %s",
- volname);
- ret = 0;
- /* if value is none of 'tcp/rdma/tcp,rdma' error out */
- if (!((strcasecmp (value, "rdma") == 0) ||
- (strcasecmp (value, "tcp") == 0) ||
- (strcasecmp (value, "tcp,rdma") == 0) ||
- (strcasecmp (value, "rdma,tcp") == 0))) {
- ret = snprintf (errstr, sizeof (errstr),
- "transport-type %s does "
- "not exist", value);
- /* lets not bother about above return value,
- its a failure anyways */
- ret = -1;
- goto out;
- }
- }
-
- ret = glusterd_check_bitrot_cmd (key, value, errstr,
- sizeof (errstr));
- if (ret)
- goto out;
-
- ret = glusterd_check_quota_cmd (key, value, errstr, sizeof (errstr));
- if (ret)
- goto out;
-
- if (is_key_glusterd_hooks_friendly (key))
- continue;
-
- ret = glusterd_volopt_validate (volinfo, dict, key, value,
- op_errstr);
- if (ret)
- goto out;
-
- exists = glusterd_check_option_exists (key, &key_fixed);
- if (exists == -1) {
- ret = -1;
- goto out;
- }
-
- if (!exists) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_INVALID_ENTRY,
- "Option with name: %s does not exist", key);
- ret = snprintf (errstr, sizeof (errstr),
- "option : %s does not exist",
- key);
- if (key_fixed)
- snprintf (errstr + ret, sizeof (errstr) - ret,
- "\nDid you mean %s?", key_fixed);
- ret = -1;
- goto out;
- }
-
- if (key_fixed)
- key = key_fixed;
-
- if (strcmp (key, "cluster.granular-entry-heal") == 0) {
- /* For granular entry-heal, if the set command was
- * invoked through volume-set CLI, then allow the
- * command only if the volume is still in 'Created'
- * state
- */
- if ((dict_getn (dict, "is-special-key",
- SLEN ("is-special-key")) == NULL) &&
- (volinfo->status != GLUSTERD_STATUS_NONE)) {
- snprintf (errstr, sizeof (errstr), " 'gluster "
- "volume set <VOLNAME> %s {enable, "
- "disable}' is not supported. Use "
- "'gluster volume heal <VOLNAME> "
- "granular-entry-heal {enable, "
- "disable}' instead.", key);
- ret = -1;
- goto out;
- }
- }
-
- /* Check if the key is cluster.op-version and set
- * local_new_op_version to the value given if possible.
- */
- if (strcmp (key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
- if (!all_vol) {
- ret = -1;
- snprintf (errstr, sizeof (errstr), "Option \""
- "%s\" is not valid for a single "
- "volume", key);
- goto out;
- }
- /* Check if cluster.op-version is the only option being
- * set
- */
- if (count != 1) {
- ret = -1;
- snprintf (errstr, sizeof (errstr), "Option \""
- "%s\" cannot be set along with other "
- "options", key);
- goto out;
- }
- /* Just reusing the variable, but I'm using it for
- * storing the op-version from value
- */
- ret = gf_string2uint (value, &local_key_op_version);
- if (ret) {
- snprintf (errstr, sizeof (errstr), "invalid "
- "number format \"%s\" in option "
- "\"%s\"", value, key);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_INVALID_ENTRY, "%s", errstr);
- goto out;
- }
-
- if (local_key_op_version > GD_OP_VERSION_MAX ||
- local_key_op_version < GD_OP_VERSION_MIN) {
- ret = -1;
- snprintf (errstr, sizeof (errstr),
- "Required op_version (%d) is not "
- "supported. Max supported op version "
- "is %d", local_key_op_version,
- priv->op_version);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VERSION_UNSUPPORTED,
- "%s", errstr);
- goto out;
- }
- if (local_key_op_version > priv->op_version) {
- local_new_op_version = local_key_op_version;
- } else {
- ret = -1;
- snprintf (errstr, sizeof (errstr),
- "Required op-version (%d) should"
- " not be equal or lower than current"
- " cluster op-version (%d).",
- local_key_op_version,
- priv->op_version);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VERSION_UNSUPPORTED,
- "%s", errstr);
- goto out;
- }
-
- goto cont;
- }
-
- ALL_VOLUME_OPTION_CHECK (volname, _gf_false, key, ret,
- op_errstr, out);
- ret = glusterd_validate_quorum_options (this, key, value,
- op_errstr);
- if (ret)
- goto out;
-
- ret = glusterd_validate_brick_mx_options (this, key, value,
- op_errstr);
- if (ret)
- goto out;
-
- local_key_op_version = glusterd_get_op_version_for_key (key);
- if (local_key_op_version > local_new_op_version)
- local_new_op_version = local_key_op_version;
- if (gd_is_client_option (key) &&
- (local_key_op_version > local_new_client_op_version))
- local_new_client_op_version = local_key_op_version;
-
- sprintf (keystr, "op-version%d", count);
- if (origin_glusterd) {
- ret = dict_set_uint32 (dict, keystr, local_key_op_version);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set key-op-version in dict");
- goto out;
- }
- } else if (check_op_version) {
- ret = dict_get_uint32 (dict, keystr, &key_op_version);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get key-op-version from"
- " dict");
- goto out;
- }
- if (local_key_op_version != key_op_version) {
- ret = -1;
- snprintf (errstr, sizeof (errstr),
- "option: %s op-version mismatch",
- key);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_VERSION_MISMATCH,
- "%s, required op-version = %"PRIu32", "
- "available op-version = %"PRIu32,
- errstr, key_op_version,
- local_key_op_version);
- goto out;
- }
- }
-
- if (glusterd_check_globaloption (key))
- global_opt = _gf_true;
-
- ret = glusterd_validate_shared_storage (key, value, errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL,
- "Failed to validate shared "
- "storage volume options");
- goto out;
- }
-
- ret = glusterd_validate_localtime_logging (key, value, errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_LOCALTIME_LOGGING_VOL_OPT_VALIDATE_FAIL,
- "Failed to validate localtime "
- "logging volume options");
- goto out;
- }
-
- ret = glusterd_validate_daemon_log_level (key, value, errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL,
- "Failed to validate daemon-log-level volume "
- "options");
- goto out;
- }
-
- if (volinfo) {
- ret = glusterd_volinfo_get (volinfo,
- VKEY_FEATURES_TRASH, &val_dup);
- if (val_dup) {
- ret = gf_string2boolean (val_dup,
- &trash_enabled);
- if (ret)
- goto out;
- }
- }
+ ret = glusterd_check_bitrot_cmd(key, value, errstr, sizeof(errstr));
+ if (ret)
+ goto out;
- if (!strcmp(key, "features.trash-dir") && trash_enabled) {
- if (strchr (value, '/')) {
- snprintf (errstr, sizeof (errstr),
- "Path is not allowed as option");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_SET_FAIL,
- "Unable to set the options in 'volume "
- "set': %s", errstr);
- ret = -1;
- goto out;
- }
+ ret = glusterd_check_quota_cmd(key, value, errstr, sizeof(errstr));
+ if (ret)
+ goto out;
- list_for_each_entry (brickinfo, &volinfo->bricks,
- brick_list) {
- /* Check for local brick */
- if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) {
- trash_path_len = strlen (value) +
- strlen (brickinfo->path) + 2;
- trash_path = GF_MALLOC (trash_path_len,
- gf_common_mt_char);
- snprintf (trash_path, trash_path_len,
- "%s/%s", brickinfo->path,
- value);
-
- /* Checks whether a directory with
- given option exists or not */
- if (!sys_access (trash_path, R_OK)) {
- snprintf (errstr,
- sizeof (errstr),
- "Path %s exists",
- value);
- gf_msg (this->name,
- GF_LOG_ERROR,
- 0, GD_MSG_VOL_SET_FAIL,
- "Unable to set the "
- "options in "
- "'volume set': %s",
- errstr);
- ret = -1;
- goto out;
- } else {
- gf_msg_debug (this->name, 0,
- "Directory with given "
- "name does not exists,"
- " continuing");
- }
-
- if (volinfo->status == GLUSTERD_STATUS_STARTED
- && brickinfo->status != GF_BRICK_STARTED) {
- /* If volume is in started state , checks
- whether bricks are online */
- snprintf (errstr, sizeof (errstr),
- "One or more bricks are down");
- gf_msg (this->name,
- GF_LOG_ERROR, 0,
- GD_MSG_VOL_SET_FAIL,
- "Unable to set the "
- "options in "
- "'volume set': %s",
- errstr);
- ret = -1;
- goto out;
- }
- }
- if (trash_path) {
- GF_FREE (trash_path);
- trash_path = NULL;
- trash_path_len = 0;
- }
- }
- } else if (!strcmp(key, "features.trash-dir") && !trash_enabled) {
- snprintf (errstr, sizeof (errstr),
- "Trash translator is not enabled. Use "
- "volume set %s trash on", volname);
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_SET_FAIL,
- "Unable to set the options in 'volume "
- "set': %s", errstr);
- ret = -1;
- goto out;
- }
- ret = dict_set_str (val_dict, key, value);
+ if (is_key_glusterd_hooks_friendly(key))
+ continue;
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Unable to set the options in 'volume set'");
- ret = -1;
- goto out;
- }
+ ret = glusterd_volopt_validate(volinfo, dict, key, value, op_errstr);
+ if (ret)
+ goto out;
- *op_errstr = NULL;
- if (!global_opt && !all_vol)
- ret = glusterd_validate_reconfopts (volinfo, val_dict, op_errstr);
- else if (!all_vol) {
- voliter = NULL;
- cds_list_for_each_entry (voliter, &priv->volumes,
- vol_list) {
- ret = glusterd_validate_globalopts (voliter,
- val_dict,
- op_errstr);
- if (ret)
- break;
- }
- }
+ exists = glusterd_check_option_exists(key, &key_fixed);
+ if (exists == -1) {
+ ret = -1;
+ goto out;
+ }
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Could not create "
- "temp volfile, some option failed: %s",
- *op_errstr);
- goto out;
- }
- dict_del (val_dict, key);
+ if (!exists) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
+ "Option with name: %s does not exist", key);
+ ret = snprintf(errstr, sizeof(errstr), "option : %s does not exist",
+ key);
+ if (key_fixed)
+ snprintf(errstr + ret, sizeof(errstr) - ret,
+ "\nDid you mean %s?", key_fixed);
+ ret = -1;
+ goto out;
+ }
- if (key_fixed) {
- GF_FREE (key_fixed);
- key_fixed = NULL;
- }
+ if (key_fixed)
+ key = key_fixed;
+
+ if (strcmp(key, "cluster.granular-entry-heal") == 0) {
+ /* For granular entry-heal, if the set command was
+ * invoked through volume-set CLI, then allow the
+ * command only if the volume is still in 'Created'
+ * state
+ */
+ if ((dict_getn(dict, "is-special-key", SLEN("is-special-key")) ==
+ NULL) &&
+ (volinfo->status != GLUSTERD_STATUS_NONE)) {
+ snprintf(errstr, sizeof(errstr),
+ " 'gluster "
+ "volume set <VOLNAME> %s {enable, "
+ "disable}' is not supported. Use "
+ "'gluster volume heal <VOLNAME> "
+ "granular-entry-heal {enable, "
+ "disable}' instead.",
+ key);
+ ret = -1;
+ goto out;
+ }
}
- /* Check if all the connected clients support the new client-op-version
+ /* Check if the key is cluster.op-version and set
+ * local_new_op_version to the value given if possible.
*/
- ret = glusterd_check_client_op_version_support
- (volname, local_new_client_op_version, op_errstr);
- if (ret)
+ if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
+ if (!all_vol) {
+ ret = -1;
+ snprintf(errstr, sizeof(errstr),
+ "Option \""
+ "%s\" is not valid for a single "
+ "volume",
+ key);
+ goto out;
+ }
+ /* Check if cluster.op-version is the only option being
+ * set
+ */
+ if (count != 1) {
+ ret = -1;
+ snprintf(errstr, sizeof(errstr),
+ "Option \""
+ "%s\" cannot be set along with other "
+ "options",
+ key);
+ goto out;
+ }
+ /* Just reusing the variable, but I'm using it for
+ * storing the op-version from value
+ */
+ ret = gf_string2uint(value, &local_key_op_version);
+ if (ret) {
+ snprintf(errstr, sizeof(errstr),
+ "invalid "
+ "number format \"%s\" in option "
+ "\"%s\"",
+ value, key);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s",
+ errstr);
+ goto out;
+ }
+
+ if (local_key_op_version > GD_OP_VERSION_MAX ||
+ local_key_op_version < GD_OP_VERSION_MIN) {
+ ret = -1;
+ snprintf(errstr, sizeof(errstr),
+ "Required op_version (%d) is not "
+ "supported. Max supported op version "
+ "is %d",
+ local_key_op_version, priv->op_version);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
+ "%s", errstr);
+ goto out;
+ }
+ if (local_key_op_version > priv->op_version) {
+ local_new_op_version = local_key_op_version;
+ } else {
+ ret = -1;
+ snprintf(errstr, sizeof(errstr),
+ "Required op-version (%d) should"
+ " not be equal or lower than current"
+ " cluster op-version (%d).",
+ local_key_op_version, priv->op_version);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
+ "%s", errstr);
goto out;
+ }
-cont:
- if (origin_glusterd) {
- ret = dict_set_uint32 (dict, "new-op-version",
- local_new_op_version);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set new-op-version in dict");
- goto out;
- }
- /* Set this value in dict so other peers know to check for
- * op-version. This is a hack for 3.3.x compatibility
- *
- * TODO: Remove this and the other places this is referred once
- * 3.3.x compatibility is not required
- */
- ret = dict_set_int32n (dict, "check-op-version",
- SLEN ("check-op-version"), 1);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set check-op-version in dict");
- goto out;
- }
+ goto cont;
}
- ret = 0;
+ ALL_VOLUME_OPTION_CHECK(volname, _gf_false, key, ret, op_errstr, out);
+ ret = glusterd_validate_quorum_options(this, key, value, op_errstr);
+ if (ret)
+ goto out;
-out:
- if (val_dict)
- dict_unref (val_dict);
+ ret = glusterd_validate_brick_mx_options(this, key, value, op_errstr);
+ if (ret)
+ goto out;
- if (trash_path)
- GF_FREE (trash_path);
+ local_key_op_version = glusterd_get_op_version_for_key(key);
+ if (local_key_op_version > local_new_op_version)
+ local_new_op_version = local_key_op_version;
+ if (gd_is_client_option(key) &&
+ (local_key_op_version > local_new_client_op_version))
+ local_new_client_op_version = local_key_op_version;
- GF_FREE (key_fixed);
- if (errstr[0] != '\0')
- *op_errstr = gf_strdup (errstr);
+ sprintf(keystr, "op-version%d", count);
+ if (origin_glusterd) {
+ ret = dict_set_uint32(dict, keystr, local_key_op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set key-op-version in dict");
+ goto out;
+ }
+ } else if (check_op_version) {
+ ret = dict_get_uint32(dict, keystr, &key_op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get key-op-version from"
+ " dict");
+ goto out;
+ }
+ if (local_key_op_version != key_op_version) {
+ ret = -1;
+ snprintf(errstr, sizeof(errstr),
+ "option: %s op-version mismatch", key);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH,
+ "%s, required op-version = %" PRIu32
+ ", "
+ "available op-version = %" PRIu32,
+ errstr, key_op_version, local_key_op_version);
+ goto out;
+ }
+ }
+ if (glusterd_check_globaloption(key))
+ global_opt = _gf_true;
+
+ ret = glusterd_validate_shared_storage(key, value, errstr);
if (ret) {
- if (!(*op_errstr)) {
- *op_errstr = gf_strdup ("Error, Validation Failed");
- gf_msg_debug (this->name, 0,
- "Error, Cannot Validate option :%s",
- *op_errstr);
- } else {
- gf_msg_debug (this->name, 0,
- "Error, Cannot Validate option");
- }
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL,
+ "Failed to validate shared "
+ "storage volume options");
+ goto out;
}
- return ret;
-}
-static int
-glusterd_water_limit_check (glusterd_volinfo_t *volinfo, gf_boolean_t is_hi,
- char **op_errstr)
-{
- int ret = -1;
- char *default_value = NULL;
- char *temp = NULL;
- uint64_t wm = 0;
- uint64_t default_wm = 0;
- struct volopt_map_entry *vmap = NULL;
- xlator_t *this = NULL;
- extern struct volopt_map_entry glusterd_volopt_map[];
- char msg[2048] = {0};
-
- this = THIS;
- GF_ASSERT (this);
-
- if (is_hi)
- ret = glusterd_volinfo_get (volinfo,
- "cluster.watermark-low", &temp);
- else
- ret = glusterd_volinfo_get (volinfo,
- "cluster.watermark-hi", &temp);
+ ret = glusterd_validate_localtime_logging(key, value, errstr);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLINFO_GET_FAIL, "failed to get watermark");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_LOCALTIME_LOGGING_VOL_OPT_VALIDATE_FAIL,
+ "Failed to validate localtime "
+ "logging volume options");
+ goto out;
}
- gf_string2bytesize_uint64 (temp, &wm);
-
- if (is_hi)
- for (vmap = glusterd_volopt_map; vmap->key; vmap++) {
- if (strcmp (vmap->key, "cluster.watermark-hi") == 0)
- default_value = vmap->value;
- }
- else
- for (vmap = glusterd_volopt_map; vmap->key; vmap++) {
- if (strcmp (vmap->key, "cluster.watermark-low") == 0)
- default_value = vmap->value;
- }
-
- gf_string2bytesize_uint64 (default_value, &default_wm);
+ ret = glusterd_validate_daemon_log_level(key, value, errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL,
+ "Failed to validate daemon-log-level volume "
+ "options");
+ goto out;
+ }
- if (is_hi) {
- if (default_wm <= wm) {
- snprintf (msg, sizeof (msg), "Resetting hi-watermark "
- "to default will make it lower or equal to "
- "the low-watermark, which is an invalid "
- "configuration state. Please lower the "
- "low-watermark first to the desired value "
- "and then reset the hi-watermark.");
+ if (volinfo) {
+ ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_TRASH, &val_dup);
+ if (val_dup) {
+ ret = gf_string2boolean(val_dup, &trash_enabled);
+ if (ret)
+ goto out;
+ }
+ }
+
+ if (!strcmp(key, "features.trash-dir") && trash_enabled) {
+ if (strchr(value, '/')) {
+ snprintf(errstr, sizeof(errstr),
+ "Path is not allowed as option");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
+ "Unable to set the options in 'volume "
+ "set': %s",
+ errstr);
+ ret = -1;
+ goto out;
+ }
+
+ list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ /* Check for local brick */
+ if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
+ trash_path_len = strlen(value) + strlen(brickinfo->path) +
+ 2;
+ trash_path = GF_MALLOC(trash_path_len, gf_common_mt_char);
+ snprintf(trash_path, trash_path_len, "%s/%s",
+ brickinfo->path, value);
+
+ /* Checks whether a directory with
+ given option exists or not */
+ if (!sys_access(trash_path, R_OK)) {
+ snprintf(errstr, sizeof(errstr), "Path %s exists",
+ value);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
+ "Unable to set the "
+ "options in "
+ "'volume set': %s",
+ errstr);
ret = -1;
goto out;
- }
- } else {
- if (default_wm >= wm) {
- snprintf (msg, sizeof (msg), "Resetting low-watermark "
- "to default will make it higher or equal to "
- "the hi-watermark, which is an invalid "
- "configuration state. Please raise the "
- "hi-watermark first to the desired value "
- "and then reset the low-watermark.");
+ } else {
+ gf_msg_debug(this->name, 0,
+ "Directory with given "
+ "name does not exists,"
+ " continuing");
+ }
+
+ if (volinfo->status == GLUSTERD_STATUS_STARTED &&
+ brickinfo->status != GF_BRICK_STARTED) {
+ /* If volume is in started state , checks
+ whether bricks are online */
+ snprintf(errstr, sizeof(errstr),
+ "One or more bricks are down");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
+ "Unable to set the "
+ "options in "
+ "'volume set': %s",
+ errstr);
ret = -1;
goto out;
+ }
}
+ if (trash_path) {
+ GF_FREE(trash_path);
+ trash_path = NULL;
+ trash_path_len = 0;
+ }
+ }
+ } else if (!strcmp(key, "features.trash-dir") && !trash_enabled) {
+ snprintf(errstr, sizeof(errstr),
+ "Trash translator is not enabled. Use "
+ "volume set %s trash on",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
+ "Unable to set the options in 'volume "
+ "set': %s",
+ errstr);
+ ret = -1;
+ goto out;
}
-out:
- if (msg[0] != '\0') {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TIER_WATERMARK_RESET_FAIL, "%s", msg);
- *op_errstr = gf_strdup (msg);
- }
- return ret;
-}
-
-static int
-glusterd_op_stage_reset_volume (dict_t *dict, char **op_errstr)
-{
- int ret = 0;
- char *volname = NULL;
- int exists = 0;
- char msg[2048] = {0};
- char *key = NULL;
- char *key_fixed = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
+ ret = dict_set_str(val_dict, key, value);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
- }
-
- if (strcasecmp (volname, "all") != 0) {
- exists = glusterd_check_volume_exists (volname);
- if (!exists) {
- snprintf (msg, sizeof (msg), FMTSTR_CHECK_VOL_EXISTS,
- volname);
- ret = -1;
- goto out;
- }
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof (msg), FMTSTR_CHECK_VOL_EXISTS,
- volname);
- goto out;
- }
-
- ret = glusterd_validate_volume_id (dict, volinfo);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to set the options in 'volume set'");
+ ret = -1;
+ goto out;
+ }
+
+ *op_errstr = NULL;
+ if (!global_opt && !all_vol)
+ ret = glusterd_validate_reconfopts(volinfo, val_dict, op_errstr);
+ else if (!all_vol) {
+ voliter = NULL;
+ cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
+ {
+ ret = glusterd_validate_globalopts(voliter, val_dict,
+ op_errstr);
if (ret)
- goto out;
-
+ break;
+ }
}
- ret = dict_get_strn (dict, "key", SLEN ("key"), &key);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get option key");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Could not create "
+ "temp volfile, some option failed: %s",
+ *op_errstr);
+ goto out;
}
+ dict_del(val_dict, key);
- if (strcmp(key, "all")) {
- exists = glusterd_check_option_exists (key, &key_fixed);
- if (exists == -1) {
- ret = -1;
- goto out;
- } else if (strcmp (key, "cluster.watermark-low") == 0) {
- ret = glusterd_water_limit_check (volinfo, _gf_false,
- op_errstr);
- if (ret)
- return ret;
- } else if (strcmp (key, "cluster.watermark-hi") == 0) {
- ret = glusterd_water_limit_check (volinfo, _gf_true,
- op_errstr);
- if (ret) {
- if (key_fixed)
- GF_FREE (key_fixed);
- return ret;
- }
- }
-
- if (!exists) {
- ret = snprintf (msg, sizeof (msg),
- "Option %s does not exist", key);
- if (key_fixed)
- snprintf (msg + ret, sizeof (msg) - ret,
- "\nDid you mean %s?", key_fixed);
- ret = -1;
- goto out;
- } else if (exists > 0) {
- if (key_fixed)
- key = key_fixed;
-
- /* 'gluster volume set/reset <VOLNAME>
- * features.quota/features.inode-quota' should
- * not be allowed as it is deprecated.
- * Setting and resetting quota/inode-quota features
- * should be allowed only through 'gluster volume quota
- * <VOLNAME> enable/disable'.
- * But, 'gluster volume set features.quota-deem-statfs'
- * can be turned on/off when quota is enabled.
- */
-
- if (strcmp (VKEY_FEATURES_INODE_QUOTA, key) == 0 ||
- strcmp (VKEY_FEATURES_QUOTA, key) == 0) {
- snprintf (msg, sizeof (msg), "'gluster volume "
- "reset <VOLNAME> %s' is deprecated. "
- "Use 'gluster volume quota <VOLNAME> "
- "disable' instead.", key);
- ret = -1;
- goto out;
- }
- ALL_VOLUME_OPTION_CHECK (volname, _gf_false, key, ret,
- op_errstr, out);
- }
+ if (key_fixed) {
+ GF_FREE(key_fixed);
+ key_fixed = NULL;
}
+ }
-out:
- GF_FREE (key_fixed);
+ /* Check if all the connected clients support the new client-op-version
+ */
+ ret = glusterd_check_client_op_version_support(
+ volname, local_new_client_op_version, op_errstr);
+ if (ret)
+ goto out;
- if (msg[0] != '\0') {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_STAGE_RESET_VOL_FAIL, "%s", msg);
- *op_errstr = gf_strdup (msg);
+cont:
+ if (origin_glusterd) {
+ ret = dict_set_uint32(dict, "new-op-version", local_new_op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set new-op-version in dict");
+ goto out;
}
-
- gf_msg_debug (this->name, 0, "Returning %d", ret);
-
- return ret;
-}
-
-
-
-static int
-glusterd_op_stage_sync_volume (dict_t *dict, char **op_errstr)
-{
- int ret = -1;
- char *volname = NULL;
- char *hostname = NULL;
- gf_boolean_t exists = _gf_false;
- glusterd_peerinfo_t *peerinfo = NULL;
- char msg[2048] = {0,};
- glusterd_volinfo_t *volinfo = NULL;
-
- ret = dict_get_strn (dict, "hostname", SLEN ("hostname"), &hostname);
+ /* Set this value in dict so other peers know to check for
+ * op-version. This is a hack for 3.3.x compatibility
+ *
+ * TODO: Remove this and the other places this is referred once
+ * 3.3.x compatibility is not required
+ */
+ ret = dict_set_int32n(dict, "check-op-version",
+ SLEN("check-op-version"), 1);
if (ret) {
- snprintf (msg, sizeof (msg), "hostname couldn't be "
- "retrieved from msg");
- *op_errstr = gf_strdup (msg);
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set check-op-version in dict");
+ goto out;
}
+ }
- if (gf_is_local_addr (hostname)) {
- //volname is not present in case of sync all
- ret = dict_get_strn (dict, "volname", SLEN ("volname"),
- &volname);
- if (!ret) {
- exists = glusterd_check_volume_exists (volname);
- if (!exists) {
- snprintf (msg, sizeof (msg), "Volume %s "
- "does not exist", volname);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret)
- goto out;
+ ret = 0;
- } else {
- ret = 0;
- }
- } else {
- rcu_read_lock ();
+out:
+ if (val_dict)
+ dict_unref(val_dict);
- peerinfo = glusterd_peerinfo_find (NULL, hostname);
- if (peerinfo == NULL) {
- ret = -1;
- snprintf (msg, sizeof (msg), "%s, is not a friend",
- hostname);
- *op_errstr = gf_strdup (msg);
+ if (trash_path)
+ GF_FREE(trash_path);
- } else if (!peerinfo->connected) {
- snprintf (msg, sizeof (msg), "%s, is not connected at "
- "the moment", hostname);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- }
+ GF_FREE(key_fixed);
+ if (errstr[0] != '\0')
+ *op_errstr = gf_strdup(errstr);
- rcu_read_unlock ();
+ if (ret) {
+ if (!(*op_errstr)) {
+ *op_errstr = gf_strdup("Error, Validation Failed");
+ gf_msg_debug(this->name, 0, "Error, Cannot Validate option :%s",
+ *op_errstr);
+ } else {
+ gf_msg_debug(this->name, 0, "Error, Cannot Validate option");
}
-
+ }
+ return ret;
+}
+static int
+glusterd_water_limit_check(glusterd_volinfo_t *volinfo, gf_boolean_t is_hi,
+ char **op_errstr)
+{
+ int ret = -1;
+ char *default_value = NULL;
+ char *temp = NULL;
+ uint64_t wm = 0;
+ uint64_t default_wm = 0;
+ struct volopt_map_entry *vmap = NULL;
+ xlator_t *this = NULL;
+ extern struct volopt_map_entry glusterd_volopt_map[];
+ char msg[2048] = {0};
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (is_hi)
+ ret = glusterd_volinfo_get(volinfo, "cluster.watermark-low", &temp);
+ else
+ ret = glusterd_volinfo_get(volinfo, "cluster.watermark-hi", &temp);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
+ "failed to get watermark");
+ goto out;
+ }
+
+ gf_string2bytesize_uint64(temp, &wm);
+
+ if (is_hi)
+ for (vmap = glusterd_volopt_map; vmap->key; vmap++) {
+ if (strcmp(vmap->key, "cluster.watermark-hi") == 0)
+ default_value = vmap->value;
+ }
+ else
+ for (vmap = glusterd_volopt_map; vmap->key; vmap++) {
+ if (strcmp(vmap->key, "cluster.watermark-low") == 0)
+ default_value = vmap->value;
+ }
+
+ gf_string2bytesize_uint64(default_value, &default_wm);
+
+ if (is_hi) {
+ if (default_wm <= wm) {
+ snprintf(msg, sizeof(msg),
+ "Resetting hi-watermark "
+ "to default will make it lower or equal to "
+ "the low-watermark, which is an invalid "
+ "configuration state. Please lower the "
+ "low-watermark first to the desired value "
+ "and then reset the hi-watermark.");
+ ret = -1;
+ goto out;
+ }
+ } else {
+ if (default_wm >= wm) {
+ snprintf(msg, sizeof(msg),
+ "Resetting low-watermark "
+ "to default will make it higher or equal to "
+ "the hi-watermark, which is an invalid "
+ "configuration state. Please raise the "
+ "hi-watermark first to the desired value "
+ "and then reset the low-watermark.");
+ ret = -1;
+ goto out;
+ }
+ }
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
-
- return ret;
+ if (msg[0] != '\0') {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TIER_WATERMARK_RESET_FAIL,
+ "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ }
+ return ret;
}
static int
-glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
+glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
{
- int ret = -1;
- uint32_t cmd = 0;
- char msg[2048] = {0,};
- char *volname = NULL;
- char *brick = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- dict_t *vol_opts = NULL;
- gf_boolean_t nfs_disabled = _gf_false;
- gf_boolean_t shd_enabled = _gf_false;
-
- GF_ASSERT (dict);
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT(priv);
-
- ret = dict_get_uint32 (dict, "cmd", &cmd);
- if (ret)
- goto out;
-
- if (cmd & GF_CLI_STATUS_ALL)
- goto out;
-
- if ((cmd & GF_CLI_STATUS_QUOTAD) &&
- (priv->op_version == GD_OP_VERSION_MIN)) {
- snprintf (msg, sizeof (msg), "The cluster is operating at "
- "version 1. Getting the status of quotad is not "
- "allowed in this state.");
- ret = -1;
- goto out;
- }
-
- if ((cmd & GF_CLI_STATUS_TIERD) &&
- (priv->op_version < GD_OP_VERSION_3_10_0)) {
- snprintf (msg, sizeof (msg), "The cluster is operating at "
- "version less than %d. Getting the "
- "status of tierd is not allowed in this state.",
- GD_OP_VERSION_3_10_0);
- ret = -1;
- goto out;
- }
-
- if ((cmd & GF_CLI_STATUS_SNAPD) &&
- (priv->op_version < GD_OP_VERSION_3_6_0)) {
- snprintf (msg, sizeof (msg), "The cluster is operating at "
- "version less than %d. Getting the "
- "status of snapd is not allowed in this state.",
- GD_OP_VERSION_3_6_0);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
+ int ret = 0;
+ char *volname = NULL;
+ int exists = 0;
+ char msg[2048] = {0};
+ char *key = NULL;
+ char *key_fixed = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ if (strcasecmp(volname, "all") != 0) {
+ exists = glusterd_check_volume_exists(volname);
+ if (!exists) {
+ snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
+ snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
}
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
- ret = -1;
- goto out;
- }
-
- ret = glusterd_validate_volume_id (dict, volinfo);
+ ret = glusterd_validate_volume_id(dict, volinfo);
if (ret)
- goto out;
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "key", SLEN("key"), &key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get option key");
+ goto out;
+ }
+
+ if (strcmp(key, "all")) {
+ exists = glusterd_check_option_exists(key, &key_fixed);
+ if (exists == -1) {
+ ret = -1;
+ goto out;
+ } else if (strcmp(key, "cluster.watermark-low") == 0) {
+ ret = glusterd_water_limit_check(volinfo, _gf_false, op_errstr);
+ if (ret)
+ return ret;
+ } else if (strcmp(key, "cluster.watermark-hi") == 0) {
+ ret = glusterd_water_limit_check(volinfo, _gf_true, op_errstr);
+ if (ret) {
+ if (key_fixed)
+ GF_FREE(key_fixed);
+ return ret;
+ }
+ }
+
+ if (!exists) {
+ ret = snprintf(msg, sizeof(msg), "Option %s does not exist", key);
+ if (key_fixed)
+ snprintf(msg + ret, sizeof(msg) - ret, "\nDid you mean %s?",
+ key_fixed);
+ ret = -1;
+ goto out;
+ } else if (exists > 0) {
+ if (key_fixed)
+ key = key_fixed;
- ret = glusterd_is_volume_started (volinfo);
- if (!ret) {
- snprintf (msg, sizeof (msg), "Volume %s is not started",
- volname);
+ /* 'gluster volume set/reset <VOLNAME>
+ * features.quota/features.inode-quota' should
+ * not be allowed as it is deprecated.
+ * Setting and resetting quota/inode-quota features
+ * should be allowed only through 'gluster volume quota
+ * <VOLNAME> enable/disable'.
+ * But, 'gluster volume set features.quota-deem-statfs'
+ * can be turned on/off when quota is enabled.
+ */
+
+ if (strcmp(VKEY_FEATURES_INODE_QUOTA, key) == 0 ||
+ strcmp(VKEY_FEATURES_QUOTA, key) == 0) {
+ snprintf(msg, sizeof(msg),
+ "'gluster volume "
+ "reset <VOLNAME> %s' is deprecated. "
+ "Use 'gluster volume quota <VOLNAME> "
+ "disable' instead.",
+ key);
ret = -1;
goto out;
+ }
+ ALL_VOLUME_OPTION_CHECK(volname, _gf_false, key, ret, op_errstr,
+ out);
}
+ }
- vol_opts = volinfo->dict;
-
- if ((cmd & GF_CLI_STATUS_NFS) != 0) {
- nfs_disabled = dict_get_str_boolean (vol_opts,
- NFS_DISABLE_MAP_KEY,
- _gf_false);
- if (nfs_disabled) {
- ret = -1;
- snprintf (msg, sizeof (msg),
- "NFS server is disabled for volume %s",
- volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- if (glusterd_is_shd_compatible_volume (volinfo)) {
- shd_enabled = gd_is_self_heal_enabled (volinfo,
- vol_opts);
- } else {
- ret = -1;
- snprintf (msg, sizeof (msg),
- "Volume %s is not Self-heal compatible",
- volname);
- goto out;
- }
- if (!shd_enabled) {
- ret = -1;
- snprintf (msg, sizeof (msg),
- "Self-heal Daemon is disabled for volume %s",
- volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
- if (!glusterd_is_volume_quota_enabled (volinfo)) {
- ret = -1;
- snprintf (msg, sizeof (msg), "Volume %s does not have "
- "quota enabled", volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
- if (!glusterd_is_bitrot_enabled (volinfo)) {
- ret = -1;
- snprintf (msg, sizeof (msg), "Volume %s does not have "
- "bitrot enabled", volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
- if (!glusterd_is_tierd_enabled (volinfo)) {
- ret = -1;
- snprintf (msg, sizeof (msg), "Volume %s does not have "
- "tierd enabled.", volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
- if (!glusterd_is_bitrot_enabled (volinfo)) {
- ret = -1;
- snprintf (msg, sizeof (msg), "Volume %s does not have "
- "bitrot enabled. Scrubber will be enabled "
- "automatically if bitrot is enabled",
- volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
- if (!glusterd_is_snapd_enabled (volinfo)) {
- ret = -1;
- snprintf (msg, sizeof (msg), "Volume %s does not have "
- "uss enabled", volname);
- goto out;
- }
- } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
- ret = dict_get_strn (dict, "brick", SLEN ("brick"), &brick);
- if (ret)
- goto out;
-
- ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
- &brickinfo,
- _gf_false);
- if (ret) {
- snprintf (msg, sizeof(msg), "No brick %s in"
- " volume %s", brick, volname);
- ret = -1;
- goto out;
- }
- }
+out:
+ GF_FREE(key_fixed);
- ret = 0;
+ if (msg[0] != '\0') {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_RESET_VOL_FAIL,
+ "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ }
- out:
- if (ret) {
- if (msg[0] != '\0')
- *op_errstr = gf_strdup (msg);
- else
- *op_errstr = gf_strdup ("Validation Failed for Status");
- }
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
- gf_msg_debug (this->name, 0, "Returning: %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_op_stage_stats_volume (dict_t *dict, char **op_errstr)
+glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr)
{
- int ret = -1;
- char *volname = NULL;
- gf_boolean_t exists = _gf_false;
- char msg[2048] = {0,};
- int32_t stats_op = GF_CLI_STATS_NONE;
- glusterd_volinfo_t *volinfo = NULL;
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- snprintf (msg, sizeof (msg), "Volume name get failed");
- goto out;
- }
-
- exists = glusterd_check_volume_exists (volname);
- ret = glusterd_volinfo_find (volname, &volinfo);
- if ((!exists) || (ret < 0)) {
- snprintf (msg, sizeof (msg), "Volume %s, "
- "doesn't exist", volname);
+ int ret = -1;
+ char *volname = NULL;
+ char *hostname = NULL;
+ gf_boolean_t exists = _gf_false;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ glusterd_volinfo_t *volinfo = NULL;
+
+ ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
+ if (ret) {
+ snprintf(msg, sizeof(msg),
+ "hostname couldn't be "
+ "retrieved from msg");
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ if (gf_is_local_addr(hostname)) {
+ // volname is not present in case of sync all
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (!ret) {
+ exists = glusterd_check_volume_exists(volname);
+ if (!exists) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s "
+ "does not exist",
+ volname);
+ *op_errstr = gf_strdup(msg);
ret = -1;
goto out;
- }
-
- ret = glusterd_validate_volume_id (dict, volinfo);
- if (ret)
+ }
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret)
goto out;
- ret = dict_get_int32n (dict, "op", SLEN ("op"), &stats_op);
- if (ret) {
- snprintf (msg, sizeof (msg), "Volume profile op get failed");
- goto out;
+ } else {
+ ret = 0;
}
+ } else {
+ rcu_read_lock();
- if (GF_CLI_STATS_START == stats_op) {
- if (_gf_true == glusterd_is_profile_on (volinfo)) {
- snprintf (msg, sizeof (msg), "Profile on Volume %s is"
- " already started", volinfo->volname);
- ret = -1;
- goto out;
- }
+ peerinfo = glusterd_peerinfo_find(NULL, hostname);
+ if (peerinfo == NULL) {
+ ret = -1;
+ snprintf(msg, sizeof(msg), "%s, is not a friend", hostname);
+ *op_errstr = gf_strdup(msg);
+ } else if (!peerinfo->connected) {
+ snprintf(msg, sizeof(msg),
+ "%s, is not connected at "
+ "the moment",
+ hostname);
+ *op_errstr = gf_strdup(msg);
+ ret = -1;
}
- if ((GF_CLI_STATS_STOP == stats_op) ||
- (GF_CLI_STATS_INFO == stats_op)) {
- if (_gf_false == glusterd_is_profile_on (volinfo)) {
- snprintf (msg, sizeof (msg), "Profile on Volume %s is"
- " not started", volinfo->volname);
- ret = -1;
- goto out;
- }
- }
- if ((GF_CLI_STATS_TOP == stats_op) ||
- (GF_CLI_STATS_INFO == stats_op)) {
- if (_gf_false == glusterd_is_volume_started (volinfo)) {
- snprintf (msg, sizeof (msg), "Volume %s is not started.",
- volinfo->volname);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_STARTED, "%s", msg);
- ret = -1;
- goto out;
- }
- }
- ret = 0;
+ rcu_read_unlock();
+ }
+
out:
- if (msg[0] != '\0') {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_OP_STAGE_STATS_VOL_FAIL, "%s", msg);
- *op_errstr = gf_strdup (msg);
- }
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
-}
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
+}
static int
-_delete_reconfig_opt (dict_t *this, char *key, data_t *value, void *data)
+glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
{
- int32_t *is_force = 0;
+ int ret = -1;
+ uint32_t cmd = 0;
+ char msg[2048] = {
+ 0,
+ };
+ char *volname = NULL;
+ char *brick = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ dict_t *vol_opts = NULL;
+ gf_boolean_t nfs_disabled = _gf_false;
+ gf_boolean_t shd_enabled = _gf_false;
+
+ GF_ASSERT(dict);
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_uint32(dict, "cmd", &cmd);
+ if (ret)
+ goto out;
+
+ if (cmd & GF_CLI_STATUS_ALL)
+ goto out;
+
+ if ((cmd & GF_CLI_STATUS_QUOTAD) &&
+ (priv->op_version == GD_OP_VERSION_MIN)) {
+ snprintf(msg, sizeof(msg),
+ "The cluster is operating at "
+ "version 1. Getting the status of quotad is not "
+ "allowed in this state.");
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_TIERD) &&
+ (priv->op_version < GD_OP_VERSION_3_10_0)) {
+ snprintf(msg, sizeof(msg),
+ "The cluster is operating at "
+ "version less than %d. Getting the "
+ "status of tierd is not allowed in this state.",
+ GD_OP_VERSION_3_10_0);
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_SNAPD) &&
+ (priv->op_version < GD_OP_VERSION_3_6_0)) {
+ snprintf(msg, sizeof(msg),
+ "The cluster is operating at "
+ "version less than %d. Getting the "
+ "status of snapd is not allowed in this state.",
+ GD_OP_VERSION_3_6_0);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
+ ret = -1;
+ goto out;
+ }
- GF_ASSERT (data);
- is_force = (int32_t*)data;
+ ret = glusterd_validate_volume_id(dict, volinfo);
+ if (ret)
+ goto out;
- /* Keys which has the flag VOLOPT_FLAG_NEVER_RESET
- * should not be deleted
- */
+ ret = glusterd_is_volume_started(volinfo);
+ if (!ret) {
+ snprintf(msg, sizeof(msg), "Volume %s is not started", volname);
+ ret = -1;
+ goto out;
+ }
+
+ vol_opts = volinfo->dict;
+
+ if ((cmd & GF_CLI_STATUS_NFS) != 0) {
+ nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
+ _gf_false);
+ if (nfs_disabled) {
+ ret = -1;
+ snprintf(msg, sizeof(msg), "NFS server is disabled for volume %s",
+ volname);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ if (glusterd_is_shd_compatible_volume(volinfo)) {
+ shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
+ } else {
+ ret = -1;
+ snprintf(msg, sizeof(msg), "Volume %s is not Self-heal compatible",
+ volname);
+ goto out;
+ }
+ if (!shd_enabled) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Self-heal Daemon is disabled for volume %s", volname);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ if (!glusterd_is_volume_quota_enabled(volinfo)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Volume %s does not have "
+ "quota enabled",
+ volname);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
+ if (!glusterd_is_bitrot_enabled(volinfo)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Volume %s does not have "
+ "bitrot enabled",
+ volname);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
+ if (!glusterd_is_tierd_enabled(volinfo)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Volume %s does not have "
+ "tierd enabled.",
+ volname);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
+ if (!glusterd_is_bitrot_enabled(volinfo)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Volume %s does not have "
+ "bitrot enabled. Scrubber will be enabled "
+ "automatically if bitrot is enabled",
+ volname);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
+ if (!glusterd_is_snapd_enabled(volinfo)) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Volume %s does not have "
+ "uss enabled",
+ volname);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
+ ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick);
+ if (ret)
+ goto out;
- if (_gf_true == glusterd_check_voloption_flags (key,
- VOLOPT_FLAG_NEVER_RESET)) {
- if (*is_force != 1)
- *is_force = *is_force | GD_OP_PROTECTED;
- goto out;
+ ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
+ _gf_false);
+ if (ret) {
+ snprintf(msg, sizeof(msg),
+ "No brick %s in"
+ " volume %s",
+ brick, volname);
+ ret = -1;
+ goto out;
}
+ }
- if (*is_force != 1) {
- if (_gf_true == glusterd_check_voloption_flags (key,
- VOLOPT_FLAG_FORCE)) {
- /* indicate to caller that we don't set the option
- * due to being protected
- */
- *is_force = *is_force | GD_OP_PROTECTED;
- goto out;
- } else {
- *is_force = *is_force | GD_OP_UNPROTECTED;
- }
- }
+ ret = 0;
- gf_msg_debug ("glusterd", 0, "deleting dict with key=%s,value=%s",
- key, value->data);
- dict_del (this, key);
- /**Delete scrubber (pause/resume) option from the dictionary if bitrot
- * option is going to be reset
- * */
- if (!strncmp (key, VKEY_FEATURES_BITROT,
- strlen (VKEY_FEATURES_BITROT))) {
- dict_deln (this, VKEY_FEATURES_SCRUB, SLEN (VKEY_FEATURES_SCRUB));
- }
out:
- return 0;
+ if (ret) {
+ if (msg[0] != '\0')
+ *op_errstr = gf_strdup(msg);
+ else
+ *op_errstr = gf_strdup("Validation Failed for Status");
+ }
+
+ gf_msg_debug(this->name, 0, "Returning: %d", ret);
+ return ret;
}
static int
-_delete_reconfig_global_opt (dict_t *this, char *key, data_t *value, void *data)
+glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr)
{
- GF_ASSERT (data);
-
- if (strcmp (GLUSTERD_GLOBAL_OPT_VERSION, key) == 0)
- goto out;
-
- _delete_reconfig_opt (this, key, value, data);
+ int ret = -1;
+ char *volname = NULL;
+ gf_boolean_t exists = _gf_false;
+ char msg[2048] = {
+ 0,
+ };
+ int32_t stats_op = GF_CLI_STATS_NONE;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Volume name get failed");
+ goto out;
+ }
+
+ exists = glusterd_check_volume_exists(volname);
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if ((!exists) || (ret < 0)) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s, "
+ "doesn't exist",
+ volname);
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_validate_volume_id(dict, volinfo);
+ if (ret)
+ goto out;
+
+ ret = dict_get_int32n(dict, "op", SLEN("op"), &stats_op);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Volume profile op get failed");
+ goto out;
+ }
+
+ if (GF_CLI_STATS_START == stats_op) {
+ if (_gf_true == glusterd_is_profile_on(volinfo)) {
+ snprintf(msg, sizeof(msg),
+ "Profile on Volume %s is"
+ " already started",
+ volinfo->volname);
+ ret = -1;
+ goto out;
+ }
+ }
+ if ((GF_CLI_STATS_STOP == stats_op) || (GF_CLI_STATS_INFO == stats_op)) {
+ if (_gf_false == glusterd_is_profile_on(volinfo)) {
+ snprintf(msg, sizeof(msg),
+ "Profile on Volume %s is"
+ " not started",
+ volinfo->volname);
+ ret = -1;
+
+ goto out;
+ }
+ }
+ if ((GF_CLI_STATS_TOP == stats_op) || (GF_CLI_STATS_INFO == stats_op)) {
+ if (_gf_false == glusterd_is_volume_started(volinfo)) {
+ snprintf(msg, sizeof(msg), "Volume %s is not started.",
+ volinfo->volname);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED, "%s",
+ msg);
+ ret = -1;
+ goto out;
+ }
+ }
+ ret = 0;
out:
- return 0;
+ if (msg[0] != '\0') {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_STATS_VOL_FAIL,
+ "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ }
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_options_reset (glusterd_volinfo_t *volinfo, char *key,
- int32_t *is_force)
+_delete_reconfig_opt(dict_t *this, char *key, data_t *value, void *data)
{
- int ret = 0;
- data_t *value = NULL;
- char *key_fixed = NULL;
- xlator_t *this = NULL;
- glusterd_svc_t *svc = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (volinfo->dict);
- GF_ASSERT (key);
-
- if (!strncmp(key, "all", 3)) {
- dict_foreach (volinfo->dict, _delete_reconfig_opt, is_force);
- ret = glusterd_enable_default_options (volinfo, NULL);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FAIL_DEFAULT_OPT_SET, "Failed to set "
- "default options on reset for volume %s",
- volinfo->volname);
- goto out;
- }
+ int32_t *is_force = 0;
+
+ GF_ASSERT(data);
+ is_force = (int32_t *)data;
+
+ /* Keys which has the flag VOLOPT_FLAG_NEVER_RESET
+ * should not be deleted
+ */
+
+ if (_gf_true ==
+ glusterd_check_voloption_flags(key, VOLOPT_FLAG_NEVER_RESET)) {
+ if (*is_force != 1)
+ *is_force = *is_force | GD_OP_PROTECTED;
+ goto out;
+ }
+
+ if (*is_force != 1) {
+ if (_gf_true ==
+ glusterd_check_voloption_flags(key, VOLOPT_FLAG_FORCE)) {
+ /* indicate to caller that we don't set the option
+ * due to being protected
+ */
+ *is_force = *is_force | GD_OP_PROTECTED;
+ goto out;
} else {
- value = dict_get (volinfo->dict, key);
- if (!value) {
- gf_msg_debug (this->name, 0,
- "no value set for option %s", key);
- goto out;
- }
- _delete_reconfig_opt (volinfo->dict, key, value, is_force);
- ret = glusterd_enable_default_options (volinfo, key);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_FAIL_DEFAULT_OPT_SET, "Failed to set "
- "default value for option '%s' on reset for "
- "volume %s", key, volinfo->volname);
- goto out;
- }
- }
-
- gd_update_volume_op_versions (volinfo);
- if (!volinfo->is_snap_volume) {
- svc = &(volinfo->snapd.svc);
- ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT);
- if (ret)
- goto out;
- }
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure (volinfo);
- if (ret)
- goto out;
- }
- svc = &(volinfo->gfproxyd.svc);
- ret = svc->reconfigure (volinfo);
- if (ret)
- goto out;
-
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Unable to create volfile for"
- " 'volume reset'");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
- goto out;
+ *is_force = *is_force | GD_OP_UNPROTECTED;
+ }
+ }
+
+ gf_msg_debug("glusterd", 0, "deleting dict with key=%s,value=%s", key,
+ value->data);
+ dict_del(this, key);
+ /**Delete scrubber (pause/resume) option from the dictionary if bitrot
+ * option is going to be reset
+ * */
+ if (!strncmp(key, VKEY_FEATURES_BITROT, strlen(VKEY_FEATURES_BITROT))) {
+ dict_deln(this, VKEY_FEATURES_SCRUB, SLEN(VKEY_FEATURES_SCRUB));
+ }
+out:
+ return 0;
+}
- if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure ();
- if (ret)
- goto out;
- }
+static int
+_delete_reconfig_global_opt(dict_t *this, char *key, data_t *value, void *data)
+{
+ GF_ASSERT(data);
- ret = 0;
+ if (strcmp(GLUSTERD_GLOBAL_OPT_VERSION, key) == 0)
+ goto out;
+ _delete_reconfig_opt(this, key, value, data);
out:
- GF_FREE (key_fixed);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ return 0;
}
static int
-glusterd_op_reset_all_volume_options (xlator_t *this, dict_t *dict)
+glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
+ int32_t *is_force)
{
- char *key = NULL;
- char *key_fixed = NULL;
- int ret = -1;
- int32_t is_force = 0;
- glusterd_conf_t *conf = NULL;
- dict_t *dup_opt = NULL;
- gf_boolean_t all = _gf_false;
- char *next_version = NULL;
- gf_boolean_t quorum_action = _gf_false;
-
- conf = this->private;
- ret = dict_get_strn (dict, "key", SLEN ("key"), &key);
+ int ret = 0;
+ data_t *value = NULL;
+ char *key_fixed = NULL;
+ xlator_t *this = NULL;
+ glusterd_svc_t *svc = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(volinfo->dict);
+ GF_ASSERT(key);
+
+ if (!strncmp(key, "all", 3)) {
+ dict_foreach(volinfo->dict, _delete_reconfig_opt, is_force);
+ ret = glusterd_enable_default_options(volinfo, NULL);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Failed to get key");
- goto out;
- }
-
- ret = dict_get_int32n (dict, "force", SLEN ("force"), &is_force);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FAIL_DEFAULT_OPT_SET,
+ "Failed to set "
+ "default options on reset for volume %s",
+ volinfo->volname);
+ goto out;
+ }
+ } else {
+ value = dict_get(volinfo->dict, key);
+ if (!value) {
+ gf_msg_debug(this->name, 0, "no value set for option %s", key);
+ goto out;
+ }
+ _delete_reconfig_opt(volinfo->dict, key, value, is_force);
+ ret = glusterd_enable_default_options(volinfo, key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FAIL_DEFAULT_OPT_SET,
+ "Failed to set "
+ "default value for option '%s' on reset for "
+ "volume %s",
+ key, volinfo->volname);
+ goto out;
+ }
+ }
+
+ gd_update_volume_op_versions(volinfo);
+ if (!volinfo->is_snap_volume) {
+ svc = &(volinfo->snapd.svc);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
if (ret)
- is_force = 0;
-
- if (strcmp (key, "all")) {
- ret = glusterd_check_option_exists (key, &key_fixed);
- if (ret <= 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_INVALID_ENTRY, "Option %s does not "
- "exist", key);
- ret = -1;
- goto out;
- }
- } else {
- all = _gf_true;
- }
-
- if (key_fixed)
- key = key_fixed;
-
- ret = -1;
- dup_opt = dict_new ();
- if (!dup_opt)
- goto out;
- if (!all) {
- dict_copy (conf->opts, dup_opt);
- dict_del (dup_opt, key);
- }
- ret = glusterd_get_next_global_opt_version_str (conf->opts,
- &next_version);
+ goto out;
+ }
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ svc = &(volinfo->tierd.svc);
+ ret = svc->reconfigure(volinfo);
if (ret)
- goto out;
+ goto out;
+ }
+ svc = &(volinfo->gfproxyd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Unable to create volfile for"
+ " 'volume reset'");
+ ret = -1;
+ goto out;
+ }
- ret = dict_set_strn (dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
- SLEN (GLUSTERD_GLOBAL_OPT_VERSION), next_version);
- if (ret)
- goto out;
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ goto out;
- ret = glusterd_store_options (this, dup_opt);
+ if (GLUSTERD_STATUS_STARTED == volinfo->status) {
+ ret = glusterd_svcs_reconfigure();
if (ret)
- goto out;
+ goto out;
+ }
- if (glusterd_is_quorum_changed (conf->opts, key, NULL))
- quorum_action = _gf_true;
+ ret = 0;
- ret = dict_set_dynstrn (conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
- SLEN (GLUSTERD_GLOBAL_OPT_VERSION),
- next_version);
- if (ret)
- goto out;
- else
- next_version = NULL;
-
- if (!all) {
- dict_del (conf->opts, key);
- } else {
- dict_foreach (conf->opts, _delete_reconfig_global_opt,
- &is_force);
- }
out:
- GF_FREE (key_fixed);
- if (dup_opt)
- dict_unref (dup_opt);
-
- gf_msg_debug (this->name, 0, "returning %d", ret);
- if (quorum_action)
- glusterd_do_quorum_action ();
- GF_FREE (next_version);
- return ret;
+ GF_FREE(key_fixed);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_op_reset_volume (dict_t *dict, char **op_rspstr)
+glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict)
{
- glusterd_volinfo_t *volinfo = NULL;
- int ret = -1;
- char *volname = NULL;
- char *key = NULL;
- char *key_fixed = NULL;
- int32_t is_force = 0;
- gf_boolean_t quorum_action = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
- }
-
- if (strcasecmp (volname, "all") == 0) {
- ret = glusterd_op_reset_all_volume_options (this, dict);
- goto out;
- }
-
- ret = dict_get_int32n (dict, "force", SLEN ("force"), &is_force);
- if (ret)
- is_force = 0;
-
- ret = dict_get_strn (dict, "key", SLEN ("key"), &key);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get option key");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, FMTSTR_CHECK_VOL_EXISTS,
- volname);
- goto out;
- }
+ char *key = NULL;
+ char *key_fixed = NULL;
+ int ret = -1;
+ int32_t is_force = 0;
+ glusterd_conf_t *conf = NULL;
+ dict_t *dup_opt = NULL;
+ gf_boolean_t all = _gf_false;
+ char *next_version = NULL;
+ gf_boolean_t quorum_action = _gf_false;
+
+ conf = this->private;
+ ret = dict_get_strn(dict, "key", SLEN("key"), &key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get key");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "force", SLEN("force"), &is_force);
+ if (ret)
+ is_force = 0;
+
+ if (strcmp(key, "all")) {
+ ret = glusterd_check_option_exists(key, &key_fixed);
+ if (ret <= 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
+ "Option %s does not "
+ "exist",
+ key);
+ ret = -1;
+ goto out;
+ }
+ } else {
+ all = _gf_true;
+ }
+
+ if (key_fixed)
+ key = key_fixed;
+
+ ret = -1;
+ dup_opt = dict_new();
+ if (!dup_opt)
+ goto out;
+ if (!all) {
+ dict_copy(conf->opts, dup_opt);
+ dict_del(dup_opt, key);
+ }
+ ret = glusterd_get_next_global_opt_version_str(conf->opts, &next_version);
+ if (ret)
+ goto out;
+
+ ret = dict_set_strn(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
+ SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_options(this, dup_opt);
+ if (ret)
+ goto out;
+
+ if (glusterd_is_quorum_changed(conf->opts, key, NULL))
+ quorum_action = _gf_true;
+
+ ret = dict_set_dynstrn(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
+ SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
+ if (ret)
+ goto out;
+ else
+ next_version = NULL;
+
+ if (!all) {
+ dict_del(conf->opts, key);
+ } else {
+ dict_foreach(conf->opts, _delete_reconfig_global_opt, &is_force);
+ }
+out:
+ GF_FREE(key_fixed);
+ if (dup_opt)
+ dict_unref(dup_opt);
+
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ if (quorum_action)
+ glusterd_do_quorum_action();
+ GF_FREE(next_version);
+ return ret;
+}
- if (strcmp (key, "all") &&
- glusterd_check_option_exists (key, &key_fixed) != 1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_INVALID_ENTRY,
- "volinfo dict inconsistency: option %s not found",
+static int
+glusterd_op_reset_volume(dict_t *dict, char **op_rspstr)
+{
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = -1;
+ char *volname = NULL;
+ char *key = NULL;
+ char *key_fixed = NULL;
+ int32_t is_force = 0;
+ gf_boolean_t quorum_action = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ if (strcasecmp(volname, "all") == 0) {
+ ret = glusterd_op_reset_all_volume_options(this, dict);
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "force", SLEN("force"), &is_force);
+ if (ret)
+ is_force = 0;
+
+ ret = dict_get_strn(dict, "key", SLEN("key"), &key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get option key");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ if (strcmp(key, "all") &&
+ glusterd_check_option_exists(key, &key_fixed) != 1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
+ "volinfo dict inconsistency: option %s not found", key);
+ ret = -1;
+ goto out;
+ }
+ if (key_fixed)
+ key = key_fixed;
+
+ if (glusterd_is_quorum_changed(volinfo->dict, key, NULL))
+ quorum_action = _gf_true;
+
+ ret = glusterd_options_reset(volinfo, key, &is_force);
+ if (ret == -1) {
+ gf_asprintf(op_rspstr, "Volume reset : failed");
+ } else if (is_force & GD_OP_PROTECTED) {
+ if (is_force & GD_OP_UNPROTECTED) {
+ gf_asprintf(op_rspstr,
+ "All unprotected fields were"
+ " reset. To reset the protected fields,"
+ " use 'force'.");
+ } else {
+ ret = -1;
+ gf_asprintf(op_rspstr,
+ "'%s' is protected. To reset"
+ " use 'force'.",
key);
- ret = -1;
- goto out;
- }
- if (key_fixed)
- key = key_fixed;
-
- if (glusterd_is_quorum_changed (volinfo->dict, key, NULL))
- quorum_action = _gf_true;
-
- ret = glusterd_options_reset (volinfo, key, &is_force);
- if (ret == -1) {
- gf_asprintf(op_rspstr, "Volume reset : failed");
- } else if (is_force & GD_OP_PROTECTED) {
- if (is_force & GD_OP_UNPROTECTED) {
- gf_asprintf (op_rspstr, "All unprotected fields were"
- " reset. To reset the protected fields,"
- " use 'force'.");
- } else {
- ret = -1;
- gf_asprintf (op_rspstr, "'%s' is protected. To reset"
- " use 'force'.", key);
- }
}
+ }
out:
- GF_FREE (key_fixed);
- if (quorum_action)
- glusterd_do_quorum_action ();
+ GF_FREE(key_fixed);
+ if (quorum_action)
+ glusterd_do_quorum_action();
- gf_msg_debug (this->name, 0, "'volume reset' returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "'volume reset' returning %d", ret);
+ return ret;
}
int
-glusterd_stop_bricks (glusterd_volinfo_t *volinfo)
+glusterd_stop_bricks(glusterd_volinfo_t *volinfo)
{
- glusterd_brickinfo_t *brickinfo = NULL;
-
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- /*TODO: Need to change @del_brick in brick_stop to _gf_true
- * once we enable synctask in peer rpc prog */
- if (glusterd_brick_stop (volinfo, brickinfo, _gf_false)) {
- gf_event (EVENT_BRICK_STOP_FAILED,
- "peer=%s;volume=%s;brick=%s",
- brickinfo->hostname, volinfo->volname,
- brickinfo->path);
- return -1;
- }
+ glusterd_brickinfo_t *brickinfo = NULL;
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ /*TODO: Need to change @del_brick in brick_stop to _gf_true
+ * once we enable synctask in peer rpc prog */
+ if (glusterd_brick_stop(volinfo, brickinfo, _gf_false)) {
+ gf_event(EVENT_BRICK_STOP_FAILED, "peer=%s;volume=%s;brick=%s",
+ brickinfo->hostname, volinfo->volname, brickinfo->path);
+ return -1;
}
+ }
- return 0;
+ return 0;
}
int
-glusterd_start_bricks (glusterd_volinfo_t *volinfo)
+glusterd_start_bricks(glusterd_volinfo_t *volinfo)
{
- int ret = -1;
- glusterd_brickinfo_t *brickinfo = NULL;
-
- GF_ASSERT (volinfo);
-
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (!brickinfo->start_triggered) {
- pthread_mutex_lock (&brickinfo->restart_mutex);
- {
- ret = glusterd_brick_start (volinfo, brickinfo,
- _gf_false,
- _gf_false);
- }
- pthread_mutex_unlock (&brickinfo->restart_mutex);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_DISCONNECTED,
- "Failed to start %s:%s for %s",
- brickinfo->hostname, brickinfo->path,
- volinfo->volname);
- gf_event (EVENT_BRICK_START_FAILED,
- "peer=%s;volume=%s;brick=%s",
- brickinfo->hostname, volinfo->volname,
- brickinfo->path);
- goto out;
- }
- }
- }
- ret = 0;
+ int ret = -1;
+ glusterd_brickinfo_t *brickinfo = NULL;
+
+ GF_ASSERT(volinfo);
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (!brickinfo->start_triggered) {
+ pthread_mutex_lock(&brickinfo->restart_mutex);
+ {
+ ret = glusterd_brick_start(volinfo, brickinfo, _gf_false,
+ _gf_false);
+ }
+ pthread_mutex_unlock(&brickinfo->restart_mutex);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_DISCONNECTED,
+ "Failed to start %s:%s for %s", brickinfo->hostname,
+ brickinfo->path, volinfo->volname);
+ gf_event(EVENT_BRICK_START_FAILED, "peer=%s;volume=%s;brick=%s",
+ brickinfo->hostname, volinfo->volname,
+ brickinfo->path);
+ goto out;
+ }
+ }
+ }
+ ret = 0;
out:
- return ret;
+ return ret;
}
static int
-glusterd_update_volumes_dict (glusterd_volinfo_t *volinfo)
+glusterd_update_volumes_dict(glusterd_volinfo_t *volinfo)
{
- int ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- char *address_family_str = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, conf, out);
-
- /* 3.9.0 onwards gNFS will be disabled by default. In case of an upgrade
- * from anything below than 3.9.0 to 3.9.x the volume's dictionary will
- * not have 'nfs.disable' key set which means the same will not be set
- * to on until explicitly done. setnfs.disable to 'on' at op-version
- * bump up flow is the ideal way here. The same is also applicable for
- * transport.address-family where if the transport type is set to tcp
- * then transport.address-family is defaulted to 'inet'.
- */
- if (conf->op_version >= GD_OP_VERSION_3_9_0) {
- if (dict_get_str_boolean (volinfo->dict, NFS_DISABLE_MAP_KEY,
- 1)) {
- ret = dict_set_dynstr_with_alloc (volinfo->dict,
- NFS_DISABLE_MAP_KEY,
- "on");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set "
- "option ' NFS_DISABLE_MAP_KEY ' on "
- "volume %s", volinfo->volname);
- goto out;
- }
- }
- ret = dict_get_strn (volinfo->dict,
- "transport.address-family",
- SLEN ("transport.address-family"),
- &address_family_str);
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *address_family_str = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ /* 3.9.0 onwards gNFS will be disabled by default. In case of an upgrade
+ * from anything below than 3.9.0 to 3.9.x the volume's dictionary will
+ * not have 'nfs.disable' key set which means the same will not be set
+ * to on until explicitly done. setnfs.disable to 'on' at op-version
+ * bump up flow is the ideal way here. The same is also applicable for
+ * transport.address-family where if the transport type is set to tcp
+ * then transport.address-family is defaulted to 'inet'.
+ */
+ if (conf->op_version >= GD_OP_VERSION_3_9_0) {
+ if (dict_get_str_boolean(volinfo->dict, NFS_DISABLE_MAP_KEY, 1)) {
+ ret = dict_set_dynstr_with_alloc(volinfo->dict, NFS_DISABLE_MAP_KEY,
+ "on");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set "
+ "option ' NFS_DISABLE_MAP_KEY ' on "
+ "volume %s",
+ volinfo->volname);
+ goto out;
+ }
+ }
+ ret = dict_get_strn(volinfo->dict, "transport.address-family",
+ SLEN("transport.address-family"),
+ &address_family_str);
+ if (ret) {
+ if (volinfo->transport_type == GF_TRANSPORT_TCP) {
+ ret = dict_set_dynstr_with_alloc(
+ volinfo->dict, "transport.address-family", "inet");
if (ret) {
- if (volinfo->transport_type == GF_TRANSPORT_TCP) {
- ret = dict_set_dynstr_with_alloc
- (volinfo->dict,
- "transport.address-family",
- "inet");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR,
- errno, GD_MSG_DICT_SET_FAILED,
- "failed to set transport."
- "address-family on %s",
- volinfo->volname);
- goto out;
- }
- }
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_SET_FAILED,
+ "failed to set transport."
+ "address-family on %s",
+ volinfo->volname);
+ goto out;
}
+ }
}
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ }
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
out:
- return ret;
+ return ret;
}
static int
-glusterd_set_brick_mx_opts (dict_t *dict, char *key, char *value,
- char **op_errstr)
+glusterd_set_brick_mx_opts(dict_t *dict, char *key, char *value,
+ char **op_errstr)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
- GF_VALIDATE_OR_GOTO (this->name, dict, out);
- GF_VALIDATE_OR_GOTO (this->name, key, out);
- GF_VALIDATE_OR_GOTO (this->name, value, out);
- GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+ GF_VALIDATE_OR_GOTO(this->name, key, out);
+ GF_VALIDATE_OR_GOTO(this->name, value, out);
+ GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
- ret = 0;
+ ret = 0;
- priv = this->private;
+ priv = this->private;
- if (!strcmp (key, GLUSTERD_BRICK_MULTIPLEX_KEY)) {
- ret = dict_set_dynstrn (priv->opts,
- GLUSTERD_BRICK_MULTIPLEX_KEY,
- SLEN (GLUSTERD_BRICK_MULTIPLEX_KEY),
- gf_strdup (value));
- }
+ if (!strcmp(key, GLUSTERD_BRICK_MULTIPLEX_KEY)) {
+ ret = dict_set_dynstrn(priv->opts, GLUSTERD_BRICK_MULTIPLEX_KEY,
+ SLEN(GLUSTERD_BRICK_MULTIPLEX_KEY),
+ gf_strdup(value));
+ }
out:
- return ret;
+ return ret;
}
/* This is a hack to prevent client-io-threads from being loaded in the graph
* when the cluster-op-version is bumped up from 3.8.x to 3.13.x. The key is
* deleted subsequently in glusterd_create_volfiles(). */
static int
-glusterd_dict_set_skip_cliot_key (glusterd_volinfo_t *volinfo)
+glusterd_dict_set_skip_cliot_key(glusterd_volinfo_t *volinfo)
{
- return dict_set_int32n (volinfo->dict, "skip-CLIOT",
- SLEN ("skip-CLIOT"), 1);
+ return dict_set_int32n(volinfo->dict, "skip-CLIOT", SLEN("skip-CLIOT"), 1);
}
static int
-glusterd_op_set_all_volume_options (xlator_t *this, dict_t *dict,
- char **op_errstr)
+glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
+ char **op_errstr)
{
- char *key = NULL;
- char *key_fixed = NULL;
- char *value = NULL;
- char *dup_value = NULL;
- int ret = -1;
- glusterd_conf_t *conf = NULL;
- dict_t *dup_opt = NULL;
- char *next_version = NULL;
- gf_boolean_t quorum_action = _gf_false;
- uint32_t op_version = 0;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_svc_t *svc = NULL;
-
- conf = this->private;
- ret = dict_get_strn (dict, "key1", SLEN ("key1"), &key);
- if (ret)
- goto out;
-
- ret = dict_get_strn (dict, "value1", SLEN ("value1"), &value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "invalid key,value pair in 'volume set'");
- goto out;
- }
-
- ret = glusterd_check_option_exists (key, &key_fixed);
- if (ret <= 0) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_UNKNOWN_KEY, "Invalid key %s", key);
- ret = -1;
- goto out;
- }
-
- if (key_fixed)
- key = key_fixed;
-
- ret = glusterd_set_shared_storage (dict, key, value, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SHARED_STRG_SET_FAIL,
- "Failed to set shared storage option");
- goto out;
- }
-
- ret = glusterd_set_brick_mx_opts (dict, key, value, op_errstr);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_MX_SET_FAIL,
- "Failed to set brick multiplexing option");
- goto out;
- }
-
- /* If the key is cluster.op-version, set conf->op_version to the value
- * if needed and save it.
- */
- if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
- ret = 0;
-
- ret = gf_string2uint (value, &op_version);
- if (ret)
- goto out;
-
- if (op_version >= conf->op_version) {
- conf->op_version = op_version;
-
- /* When a bump up happens, update the quota.conf file
- * as well. This is because, till 3.7 we had a quota
- * conf version v1.1 in quota.conf. When inode-quota
- * feature is introduced, this needs to be changed to
- * v1.2 in quota.conf and 16 bytes uuid in quota.conf
- * needs to be changed to 17 bytes. Look
- * glusterd_store_quota_config for more details.
- */
- cds_list_for_each_entry (volinfo, &conf->volumes,
- vol_list) {
- ret = glusterd_store_quota_config
- (volinfo, NULL, NULL,
- GF_QUOTA_OPTION_TYPE_UPGRADE,
- NULL);
- if (ret)
- goto out;
- ret = glusterd_update_volumes_dict (volinfo);
- if (ret)
- goto out;
-
- if (glusterd_dict_set_skip_cliot_key (volinfo))
- goto out;
-
- if (!volinfo->is_snap_volume) {
- svc = &(volinfo->snapd.svc);
- ret = svc->manager (svc, volinfo,
- PROC_START_NO_WAIT);
- if (ret)
- goto out;
- }
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure (volinfo);
- if (ret)
- goto out;
- }
-
- svc = &(volinfo->gfproxyd.svc);
- ret = svc->reconfigure (volinfo);
- if (ret)
- goto out;
-
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Unable to create volfile for"
- " 'volume set'");
- goto out;
- }
- if (GLUSTERD_STATUS_STARTED
- == volinfo->status) {
- ret = glusterd_svcs_reconfigure ();
- if (ret) {
- gf_msg (this->name,
- GF_LOG_ERROR, 0,
- GD_MSG_SVC_RESTART_FAIL,
- "Unable to restart "
- "services");
- goto out;
- }
- }
- }
- ret = glusterd_store_global_info (this);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_VERS_STORE_FAIL,
- "Failed to store op-version.");
- }
- }
- /* No need to save cluster.op-version in conf->opts
- */
- goto out;
- }
+ char *key = NULL;
+ char *key_fixed = NULL;
+ char *value = NULL;
+ char *dup_value = NULL;
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ dict_t *dup_opt = NULL;
+ char *next_version = NULL;
+ gf_boolean_t quorum_action = _gf_false;
+ uint32_t op_version = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_svc_t *svc = NULL;
+
+ conf = this->private;
+ ret = dict_get_strn(dict, "key1", SLEN("key1"), &key);
+ if (ret)
+ goto out;
+
+ ret = dict_get_strn(dict, "value1", SLEN("value1"), &value);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "invalid key,value pair in 'volume set'");
+ goto out;
+ }
+
+ ret = glusterd_check_option_exists(key, &key_fixed);
+ if (ret <= 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_KEY,
+ "Invalid key %s", key);
ret = -1;
- dup_opt = dict_new ();
- if (!dup_opt)
- goto out;
- dict_copy (conf->opts, dup_opt);
- ret = dict_set_str (dup_opt, key, value);
- if (ret)
- goto out;
-
- ret = glusterd_get_next_global_opt_version_str (conf->opts,
- &next_version);
- if (ret)
- goto out;
+ goto out;
+ }
+
+ if (key_fixed)
+ key = key_fixed;
+
+ ret = glusterd_set_shared_storage(dict, key, value, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SHARED_STRG_SET_FAIL,
+ "Failed to set shared storage option");
+ goto out;
+ }
+
+ ret = glusterd_set_brick_mx_opts(dict, key, value, op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_MX_SET_FAIL,
+ "Failed to set brick multiplexing option");
+ goto out;
+ }
+
+ /* If the key is cluster.op-version, set conf->op_version to the value
+ * if needed and save it.
+ */
+ if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
+ ret = 0;
- ret = dict_set_strn (dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
- SLEN (GLUSTERD_GLOBAL_OPT_VERSION),
- next_version);
+ ret = gf_string2uint(value, &op_version);
if (ret)
- goto out;
+ goto out;
+
+ if (op_version >= conf->op_version) {
+ conf->op_version = op_version;
+
+ /* When a bump up happens, update the quota.conf file
+ * as well. This is because, till 3.7 we had a quota
+ * conf version v1.1 in quota.conf. When inode-quota
+ * feature is introduced, this needs to be changed to
+ * v1.2 in quota.conf and 16 bytes uuid in quota.conf
+ * needs to be changed to 17 bytes. Look
+ * glusterd_store_quota_config for more details.
+ */
+ cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
+ {
+ ret = glusterd_store_quota_config(
+ volinfo, NULL, NULL, GF_QUOTA_OPTION_TYPE_UPGRADE, NULL);
+ if (ret)
+ goto out;
+ ret = glusterd_update_volumes_dict(volinfo);
+ if (ret)
+ goto out;
- ret = glusterd_store_options (this, dup_opt);
- if (ret)
- goto out;
+ if (glusterd_dict_set_skip_cliot_key(volinfo))
+ goto out;
- if (glusterd_is_quorum_changed (conf->opts, key, value))
- quorum_action = _gf_true;
+ if (!volinfo->is_snap_volume) {
+ svc = &(volinfo->snapd.svc);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
+ if (ret)
+ goto out;
+ }
- ret = dict_set_dynstrn (conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
- SLEN (GLUSTERD_GLOBAL_OPT_VERSION),
- next_version);
- if (ret)
- goto out;
- else
- next_version = NULL;
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ svc = &(volinfo->tierd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
+ }
- dup_value = gf_strdup (value);
- if (!dup_value)
- goto out;
+ svc = &(volinfo->gfproxyd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
- ret = dict_set_dynstr (conf->opts, key, dup_value);
- if (ret)
- goto out;
- else
- dup_value = NULL; /* Protect the allocation from GF_FREE */
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL,
+ "Unable to create volfile for"
+ " 'volume set'");
+ goto out;
+ }
+ if (GLUSTERD_STATUS_STARTED == volinfo->status) {
+ ret = glusterd_svcs_reconfigure();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SVC_RESTART_FAIL,
+ "Unable to restart "
+ "services");
+ goto out;
+ }
+ }
+ }
+ ret = glusterd_store_global_info(this);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_STORE_FAIL,
+ "Failed to store op-version.");
+ }
+ }
+ /* No need to save cluster.op-version in conf->opts
+ */
+ goto out;
+ }
+ ret = -1;
+ dup_opt = dict_new();
+ if (!dup_opt)
+ goto out;
+ dict_copy(conf->opts, dup_opt);
+ ret = dict_set_str(dup_opt, key, value);
+ if (ret)
+ goto out;
+
+ ret = glusterd_get_next_global_opt_version_str(conf->opts, &next_version);
+ if (ret)
+ goto out;
+
+ ret = dict_set_strn(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
+ SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
+ if (ret)
+ goto out;
+
+ ret = glusterd_store_options(this, dup_opt);
+ if (ret)
+ goto out;
+
+ if (glusterd_is_quorum_changed(conf->opts, key, value))
+ quorum_action = _gf_true;
+
+ ret = dict_set_dynstrn(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
+ SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version);
+ if (ret)
+ goto out;
+ else
+ next_version = NULL;
+
+ dup_value = gf_strdup(value);
+ if (!dup_value)
+ goto out;
+
+ ret = dict_set_dynstr(conf->opts, key, dup_value);
+ if (ret)
+ goto out;
+ else
+ dup_value = NULL; /* Protect the allocation from GF_FREE */
out:
- GF_FREE (dup_value);
- GF_FREE (key_fixed);
- if (dup_opt)
- dict_unref (dup_opt);
-
- gf_msg_debug (this->name, 0, "returning %d", ret);
- if (quorum_action)
- glusterd_do_quorum_action ();
- GF_FREE (next_version);
- return ret;
+ GF_FREE(dup_value);
+ GF_FREE(key_fixed);
+ if (dup_opt)
+ dict_unref(dup_opt);
+
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ if (quorum_action)
+ glusterd_do_quorum_action();
+ GF_FREE(next_version);
+ return ret;
}
-
int
-glusterd_op_get_max_opversion (char **op_errstr, dict_t *rsp_dict)
+glusterd_op_get_max_opversion(char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO (THIS->name, rsp_dict, out);
+ GF_VALIDATE_OR_GOTO(THIS->name, rsp_dict, out);
- ret = dict_set_int32n (rsp_dict, "max-opversion",
- SLEN ("max-opversion"), GD_OP_VERSION_MAX);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
- "Setting value for max-opversion to dict failed");
- goto out;
- }
+ ret = dict_set_int32n(rsp_dict, "max-opversion", SLEN("max-opversion"),
+ GD_OP_VERSION_MAX);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Setting value for max-opversion to dict failed");
+ goto out;
+ }
out:
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ return ret;
}
-
static int
-glusterd_set_shared_storage (dict_t *dict, char *key, char *value,
- char **op_errstr)
+glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
+ char **op_errstr)
{
- int32_t ret = -1;
- char hooks_args[PATH_MAX] = {0, };
- char errstr[PATH_MAX] = {0, };
- xlator_t *this = NULL;
- int32_t len = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
- GF_VALIDATE_OR_GOTO (this->name, dict, out);
- GF_VALIDATE_OR_GOTO (this->name, key, out);
- GF_VALIDATE_OR_GOTO (this->name, value, out);
- GF_VALIDATE_OR_GOTO (this->name, op_errstr, out);
+ int32_t ret = -1;
+ char hooks_args[PATH_MAX] = {
+ 0,
+ };
+ char errstr[PATH_MAX] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ int32_t len = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+ GF_VALIDATE_OR_GOTO(this->name, key, out);
+ GF_VALIDATE_OR_GOTO(this->name, value, out);
+ GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
+
+ ret = 0;
+
+ if (strcmp(key, GLUSTERD_SHARED_STORAGE_KEY)) {
+ goto out;
+ }
+
+ /* Re-create the brick path so as to be *
+ * able to re-use it *
+ */
+ ret = recursive_rmdir(GLUSTER_SHARED_STORAGE_BRICK_DIR);
+ if (ret) {
+ snprintf(errstr, PATH_MAX,
+ "Failed to remove shared "
+ "storage brick(%s). "
+ "Reason: %s",
+ GLUSTER_SHARED_STORAGE_BRICK_DIR, strerror(errno));
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED, "%s",
+ errstr);
+ ret = -1;
+ goto out;
+ }
+
+ ret = mkdir_p(GLUSTER_SHARED_STORAGE_BRICK_DIR, 0777, _gf_true);
+ if (-1 == ret) {
+ snprintf(errstr, PATH_MAX,
+ "Failed to create shared "
+ "storage brick(%s). "
+ "Reason: %s",
+ GLUSTER_SHARED_STORAGE_BRICK_DIR, strerror(errno));
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED, "%s",
+ errstr);
+ goto out;
+ }
+
+ if (is_origin_glusterd(dict)) {
+ len = snprintf(hooks_args, sizeof(hooks_args),
+ "is_originator=1,local_node_hostname=%s",
+ local_node_hostname);
+ } else {
+ len = snprintf(hooks_args, sizeof(hooks_args),
+ "is_originator=0,local_node_hostname=%s",
+ local_node_hostname);
+ }
+ if ((len < 0) || (len >= sizeof(hooks_args))) {
+ ret = -1;
+ goto out;
+ }
- ret = 0;
+ ret = dict_set_dynstr_with_alloc(dict, "hooks_args", hooks_args);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Failed to set"
+ " hooks_args in dict.");
+ goto out;
+ }
- if (strcmp (key, GLUSTERD_SHARED_STORAGE_KEY)) {
- goto out;
+out:
+ if (ret && strlen(errstr)) {
+ *op_errstr = gf_strdup(errstr);
+ }
+
+ return ret;
+}
+
+static int
+glusterd_op_set_volume(dict_t *dict, char **errstr)
+{
+ int ret = 0;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *volname = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int count = 1;
+ char *key = NULL;
+ char *key_fixed = NULL;
+ char *value = NULL;
+ char keystr[50] = {
+ 0,
+ };
+ int keylen;
+ gf_boolean_t global_opt = _gf_false;
+ gf_boolean_t global_opts_set = _gf_false;
+ glusterd_volinfo_t *voliter = NULL;
+ int32_t dict_count = 0;
+ gf_boolean_t check_op_version = _gf_false;
+ uint32_t new_op_version = 0;
+ gf_boolean_t quorum_action = _gf_false;
+ glusterd_svc_t *svc = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &dict_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Count(dict),not set in Volume-Set");
+ goto out;
+ }
+
+ if (dict_count == 0) {
+ ret = glusterd_volset_help(NULL, errstr);
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ if (strcasecmp(volname, "all") == 0) {
+ ret = glusterd_op_set_all_volume_options(this, dict, errstr);
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ /* TODO: Remove this once v3.3 compatibility is not required */
+ check_op_version = dict_get_str_boolean(dict, "check-op-version",
+ _gf_false);
+
+ if (check_op_version) {
+ ret = dict_get_uint32(dict, "new-op-version", &new_op_version);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get new op-version from dict");
+ goto out;
}
+ }
- /* Re-create the brick path so as to be *
- * able to re-use it *
- */
- ret = recursive_rmdir (GLUSTER_SHARED_STORAGE_BRICK_DIR);
+ for (count = 1; ret != -1; count++) {
+ keylen = snprintf(keystr, sizeof(keystr), "key%d", count);
+ ret = dict_get_strn(dict, keystr, keylen, &key);
+ if (ret)
+ break;
+
+ keylen = snprintf(keystr, sizeof(keystr), "value%d", count);
+ ret = dict_get_strn(dict, keystr, keylen, &value);
if (ret) {
- snprintf (errstr, PATH_MAX,
- "Failed to remove shared "
- "storage brick(%s). "
- "Reason: %s", GLUSTER_SHARED_STORAGE_BRICK_DIR,
- strerror (errno));
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DIR_OP_FAILED, "%s", errstr);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "invalid key,value pair in 'volume set'");
+ ret = -1;
+ goto out;
+ }
+
+ if (strcmp(key, "config.memory-accounting") == 0) {
+ ret = gf_string2boolean(value, &volinfo->memory_accounting);
+ }
+
+ if (strcmp(key, "config.transport") == 0) {
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_TRANSPORT_TYPE_CHANGE,
+ "changing transport-type for volume %s to %s", volname,
+ value);
+ ret = 0;
+ if (strcasecmp(value, "rdma") == 0) {
+ volinfo->transport_type = GF_TRANSPORT_RDMA;
+ } else if (strcasecmp(value, "tcp") == 0) {
+ volinfo->transport_type = GF_TRANSPORT_TCP;
+ } else if ((strcasecmp(value, "tcp,rdma") == 0) ||
+ (strcasecmp(value, "rdma,tcp") == 0)) {
+ volinfo->transport_type = GF_TRANSPORT_BOTH_TCP_RDMA;
+ } else {
ret = -1;
goto out;
+ }
}
- ret = mkdir_p (GLUSTER_SHARED_STORAGE_BRICK_DIR, 0777, _gf_true);
- if (-1 == ret) {
- snprintf (errstr, PATH_MAX,
- "Failed to create shared "
- "storage brick(%s). "
- "Reason: %s", GLUSTER_SHARED_STORAGE_BRICK_DIR,
- strerror (errno));
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_CREATE_DIR_FAILED, "%s", errstr);
+ if (!is_key_glusterd_hooks_friendly(key)) {
+ ret = glusterd_check_option_exists(key, &key_fixed);
+ GF_ASSERT(ret);
+ if (ret <= 0) {
+ key_fixed = NULL;
goto out;
+ }
}
- if (is_origin_glusterd (dict)) {
- len = snprintf(hooks_args, sizeof(hooks_args),
- "is_originator=1,local_node_hostname=%s",
- local_node_hostname);
- } else {
- len = snprintf(hooks_args, sizeof(hooks_args),
- "is_originator=0,local_node_hostname=%s",
- local_node_hostname);
- }
- if ((len < 0) || (len >= sizeof(hooks_args))) {
- ret = -1;
- goto out;
+ global_opt = _gf_false;
+ if (glusterd_check_globaloption(key)) {
+ global_opt = _gf_true;
+ global_opts_set = _gf_true;
}
- ret = dict_set_dynstr_with_alloc (dict, "hooks_args", hooks_args);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_SET_FAILED, "Failed to set"
- " hooks_args in dict.");
- goto out;
- }
+ if (!global_opt)
+ value = gf_strdup(value);
-out:
- if (ret && strlen(errstr)) {
- *op_errstr = gf_strdup (errstr);
+ if (!value) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
+ "Unable to set the options in 'volume set'");
+ ret = -1;
+ goto out;
}
- return ret;
-}
+ if (key_fixed)
+ key = key_fixed;
+ if (glusterd_is_quorum_changed(volinfo->dict, key, value))
+ quorum_action = _gf_true;
-static int
-glusterd_op_set_volume (dict_t *dict, char **errstr)
-{
- int ret = 0;
- glusterd_volinfo_t *volinfo = NULL;
- char *volname = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- int count = 1;
- char *key = NULL;
- char *key_fixed = NULL;
- char *value = NULL;
- char keystr[50] = {0, };
- int keylen;
- gf_boolean_t global_opt = _gf_false;
- gf_boolean_t global_opts_set = _gf_false;
- glusterd_volinfo_t *voliter = NULL;
- int32_t dict_count = 0;
- gf_boolean_t check_op_version = _gf_false;
- uint32_t new_op_version = 0;
- gf_boolean_t quorum_action = _gf_false;
- glusterd_svc_t *svc = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &dict_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Count(dict),not set in Volume-Set");
+ if (global_opt) {
+ cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
+ {
+ value = gf_strdup(value);
+ ret = dict_set_dynstr(voliter->dict, key, value);
+ if (ret)
+ goto out;
+ }
+ } else {
+ ret = dict_set_dynstr(volinfo->dict, key, value);
+ if (ret)
goto out;
}
- if (dict_count == 0) {
- ret = glusterd_volset_help (NULL, errstr);
- goto out;
- }
+ if (key_fixed) {
+ GF_FREE(key_fixed);
+ key_fixed = NULL;
+ }
+ }
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
+ if (count == 1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_OPTIONS_GIVEN,
+ "No options received ");
+ ret = -1;
+ goto out;
+ }
+
+ /* Update the cluster op-version before regenerating volfiles so that
+ * correct volfiles are generated
+ */
+ if (new_op_version > priv->op_version) {
+ priv->op_version = new_op_version;
+ ret = glusterd_store_global_info(this);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_STORE_FAIL,
+ "Failed to store op-version");
+ goto out;
}
+ }
+ if (!global_opts_set) {
+ gd_update_volume_op_versions(volinfo);
- if (strcasecmp (volname, "all") == 0) {
- ret = glusterd_op_set_all_volume_options (this, dict,
- errstr);
+ if (!volinfo->is_snap_volume) {
+ svc = &(volinfo->snapd.svc);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
+ if (ret)
goto out;
}
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, FMTSTR_CHECK_VOL_EXISTS,
- volname);
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ svc = &(volinfo->tierd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
goto out;
}
+ svc = &(volinfo->gfproxyd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
- /* TODO: Remove this once v3.3 compatibility is not required */
- check_op_version = dict_get_str_boolean (dict, "check-op-version",
- _gf_false);
-
- if (check_op_version) {
- ret = dict_get_uint32 (dict, "new-op-version", &new_op_version);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get new op-version from dict");
- goto out;
- }
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Unable to create volfile for"
+ " 'volume set'");
+ ret = -1;
+ goto out;
}
- for (count = 1; ret != -1 ; count++) {
-
- keylen = snprintf (keystr, sizeof (keystr), "key%d", count);
- ret = dict_get_strn (dict, keystr, keylen, &key);
- if (ret)
- break;
-
- keylen = snprintf (keystr, sizeof (keystr), "value%d", count);
- ret = dict_get_strn (dict, keystr, keylen, &value);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "invalid key,value pair in 'volume set'");
- ret = -1;
- goto out;
- }
-
- if (strcmp (key, "config.memory-accounting") == 0) {
- ret = gf_string2boolean (value,
- &volinfo->memory_accounting);
- }
-
- if (strcmp (key, "config.transport") == 0) {
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TRANSPORT_TYPE_CHANGE,
- "changing transport-type for volume %s to %s",
- volname, value);
- ret = 0;
- if (strcasecmp (value, "rdma") == 0) {
- volinfo->transport_type = GF_TRANSPORT_RDMA;
- } else if (strcasecmp (value, "tcp") == 0) {
- volinfo->transport_type = GF_TRANSPORT_TCP;
- } else if ((strcasecmp (value, "tcp,rdma") == 0) ||
- (strcasecmp (value, "rdma,tcp") == 0)) {
- volinfo->transport_type =
- GF_TRANSPORT_BOTH_TCP_RDMA;
- } else {
- ret = -1;
- goto out;
- }
- }
-
- if (!is_key_glusterd_hooks_friendly (key)) {
- ret = glusterd_check_option_exists (key, &key_fixed);
- GF_ASSERT (ret);
- if (ret <= 0) {
- key_fixed = NULL;
- goto out;
- }
- }
-
- global_opt = _gf_false;
- if (glusterd_check_globaloption (key)) {
- global_opt = _gf_true;
- global_opts_set = _gf_true;
- }
-
- if (!global_opt)
- value = gf_strdup (value);
-
- if (!value) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_SET_FAIL,
- "Unable to set the options in 'volume set'");
- ret = -1;
- goto out;
- }
-
- if (key_fixed)
- key = key_fixed;
-
- if (glusterd_is_quorum_changed (volinfo->dict, key, value))
- quorum_action = _gf_true;
-
- if (global_opt) {
- cds_list_for_each_entry (voliter, &priv->volumes,
- vol_list) {
- value = gf_strdup (value);
- ret = dict_set_dynstr (voliter->dict, key,
- value);
- if (ret)
- goto out;
- }
- } else {
- ret = dict_set_dynstr (volinfo->dict, key, value);
- if (ret)
- goto out;
- }
-
- if (key_fixed) {
- GF_FREE (key_fixed);
- key_fixed = NULL;
- }
- }
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ goto out;
- if (count == 1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_NO_OPTIONS_GIVEN, "No options received ");
- ret = -1;
+ if (GLUSTERD_STATUS_STARTED == volinfo->status) {
+ ret = glusterd_svcs_reconfigure();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
+ "Unable to restart services");
goto out;
+ }
}
- /* Update the cluster op-version before regenerating volfiles so that
- * correct volfiles are generated
- */
- if (new_op_version > priv->op_version) {
- priv->op_version = new_op_version;
- ret = glusterd_store_global_info (this);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_VERS_STORE_FAIL,
- "Failed to store op-version");
- goto out;
- }
- }
- if (!global_opts_set) {
- gd_update_volume_op_versions (volinfo);
+ } else {
+ cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
+ {
+ volinfo = voliter;
+ gd_update_volume_op_versions(volinfo);
- if (!volinfo->is_snap_volume) {
- svc = &(volinfo->snapd.svc);
- ret = svc->manager (svc, volinfo, PROC_START_NO_WAIT);
- if (ret)
- goto out;
- }
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure (volinfo);
- if (ret)
- goto out;
- }
- svc = &(volinfo->gfproxyd.svc);
- ret = svc->reconfigure (volinfo);
+ if (!volinfo->is_snap_volume) {
+ svc = &(volinfo->snapd.svc);
+ ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
if (ret)
- goto out;
+ goto out;
+ }
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Unable to create volfile for"
- " 'volume set'");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ svc = &(volinfo->tierd.svc);
+ ret = svc->reconfigure(volinfo);
if (ret)
- goto out;
+ goto out;
+ }
- if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure ();
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SVC_RESTART_FAIL,
- "Unable to restart services");
- goto out;
- }
- }
-
- } else {
- cds_list_for_each_entry (voliter, &priv->volumes, vol_list) {
- volinfo = voliter;
- gd_update_volume_op_versions (volinfo);
-
- if (!volinfo->is_snap_volume) {
- svc = &(volinfo->snapd.svc);
- ret = svc->manager (svc, volinfo,
- PROC_START_NO_WAIT);
- if (ret)
- goto out;
- }
+ svc = &(volinfo->gfproxyd.svc);
+ ret = svc->reconfigure(volinfo);
+ if (ret)
+ goto out;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- svc = &(volinfo->tierd.svc);
- ret = svc->reconfigure (volinfo);
- if (ret)
- goto out;
- }
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Unable to create volfile for"
+ " 'volume set'");
+ ret = -1;
+ goto out;
+ }
- svc = &(volinfo->gfproxyd.svc);
- ret = svc->reconfigure (volinfo);
- if (ret)
- goto out;
-
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Unable to create volfile for"
- " 'volume set'");
- ret = -1;
- goto out;
- }
+ ret = glusterd_store_volinfo(volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ goto out;
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
- goto out;
-
- if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure ();
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SVC_RESTART_FAIL,
- "Unable to restart services");
- goto out;
- }
- }
+ if (GLUSTERD_STATUS_STARTED == volinfo->status) {
+ ret = glusterd_svcs_reconfigure();
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
+ "Unable to restart services");
+ goto out;
}
+ }
}
+ }
- out:
- GF_FREE (key_fixed);
- gf_msg_debug (this->name, 0, "returning %d", ret);
- if (quorum_action)
- glusterd_do_quorum_action ();
- return ret;
+out:
+ GF_FREE(key_fixed);
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ if (quorum_action)
+ glusterd_do_quorum_action();
+ return ret;
}
-
static int
-glusterd_op_sync_volume (dict_t *dict, char **op_errstr,
- dict_t *rsp_dict)
+glusterd_op_sync_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- char *volname = NULL;
- char *hostname = NULL;
- char msg[2048] = {0,};
- int count = 1;
- int vol_count = 0;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_strn (dict, "hostname", SLEN ("hostname"), &hostname);
- if (ret) {
- snprintf (msg, sizeof (msg), "hostname couldn't be "
- "retrieved from msg");
- *op_errstr = gf_strdup (msg);
- goto out;
- }
+ int ret = -1;
+ char *volname = NULL;
+ char *hostname = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ int count = 1;
+ int vol_count = 0;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname);
+ if (ret) {
+ snprintf(msg, sizeof(msg),
+ "hostname couldn't be "
+ "retrieved from msg");
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+
+ if (!gf_is_local_addr(hostname)) {
+ ret = 0;
+ goto out;
+ }
- if (!gf_is_local_addr (hostname)) {
- ret = 0;
- goto out;
+ // volname is not present in case of sync all
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (!ret) {
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Volume with name: %s "
+ "not exists",
+ volname);
+ goto out;
}
+ }
- //volname is not present in case of sync all
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (!ret) {
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "Volume with name: %s "
- "not exists", volname);
- goto out;
- }
- }
+ if (!rsp_dict) {
+ // this should happen only on source
+ ret = 0;
+ goto out;
+ }
- if (!rsp_dict) {
- //this should happen only on source
- ret = 0;
+ if (volname) {
+ ret = glusterd_add_volume_to_dict(volinfo, rsp_dict, 1, "volume");
+ if (ret)
+ goto out;
+ vol_count = 1;
+ } else {
+ cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
+ {
+ ret = glusterd_add_volume_to_dict(volinfo, rsp_dict, count,
+ "volume");
+ if (ret)
goto out;
- }
- if (volname) {
- ret = glusterd_add_volume_to_dict (volinfo, rsp_dict,
- 1, "volume");
- if (ret)
- goto out;
- vol_count = 1;
- } else {
- cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
- ret = glusterd_add_volume_to_dict (volinfo, rsp_dict,
- count, "volume");
- if (ret)
- goto out;
-
- vol_count = count++;
- }
+ vol_count = count++;
}
- ret = dict_set_int32n (rsp_dict, "count", SLEN ("count"), vol_count);
+ }
+ ret = dict_set_int32n(rsp_dict, "count", SLEN("count"), vol_count);
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_add_profile_volume_options (glusterd_volinfo_t *volinfo)
+glusterd_add_profile_volume_options(glusterd_volinfo_t *volinfo)
{
- int ret = -1;
-
- GF_ASSERT (volinfo);
-
- ret = dict_set_nstrn (volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT,
- SLEN (VKEY_DIAG_LAT_MEASUREMENT),
- "on", SLEN ("on"));
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "failed to set the volume %s "
- "option %s value %s",
- volinfo->volname, VKEY_DIAG_LAT_MEASUREMENT, "on");
- goto out;
- }
-
- ret = dict_set_nstrn (volinfo->dict, VKEY_DIAG_CNT_FOP_HITS,
- SLEN (VKEY_DIAG_CNT_FOP_HITS),
- "on", SLEN ("on"));
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "failed to set the volume %s "
- "option %s value %s",
- volinfo->volname, VKEY_DIAG_CNT_FOP_HITS, "on");
- goto out;
- }
+ int ret = -1;
+
+ GF_ASSERT(volinfo);
+
+ ret = dict_set_nstrn(volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT,
+ SLEN(VKEY_DIAG_LAT_MEASUREMENT), "on", SLEN("on"));
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set the volume %s "
+ "option %s value %s",
+ volinfo->volname, VKEY_DIAG_LAT_MEASUREMENT, "on");
+ goto out;
+ }
+
+ ret = dict_set_nstrn(volinfo->dict, VKEY_DIAG_CNT_FOP_HITS,
+ SLEN(VKEY_DIAG_CNT_FOP_HITS), "on", SLEN("on"));
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set the volume %s "
+ "option %s value %s",
+ volinfo->volname, VKEY_DIAG_CNT_FOP_HITS, "on");
+ goto out;
+ }
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
static void
-glusterd_remove_profile_volume_options (glusterd_volinfo_t *volinfo)
+glusterd_remove_profile_volume_options(glusterd_volinfo_t *volinfo)
{
- GF_ASSERT (volinfo);
+ GF_ASSERT(volinfo);
- dict_deln (volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT,
- SLEN (VKEY_DIAG_LAT_MEASUREMENT));
- dict_deln (volinfo->dict, VKEY_DIAG_CNT_FOP_HITS,
- SLEN (VKEY_DIAG_CNT_FOP_HITS));
+ dict_deln(volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT,
+ SLEN(VKEY_DIAG_LAT_MEASUREMENT));
+ dict_deln(volinfo->dict, VKEY_DIAG_CNT_FOP_HITS,
+ SLEN(VKEY_DIAG_CNT_FOP_HITS));
}
static int
-glusterd_op_stats_volume (dict_t *dict, char **op_errstr,
- dict_t *rsp_dict)
+glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- char *volname = NULL;
- char msg[2048] = {0,};
- glusterd_volinfo_t *volinfo = NULL;
- int32_t stats_op = GF_CLI_STATS_NONE;
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "volume name get failed");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof (msg), "Volume %s does not exists",
- volname);
-
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", msg);
- goto out;
- }
-
- ret = dict_get_int32n (dict, "op", SLEN ("op"), &stats_op);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "volume profile op get failed");
- goto out;
- }
-
- switch (stats_op) {
+ int ret = -1;
+ char *volname = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ glusterd_volinfo_t *volinfo = NULL;
+ int32_t stats_op = GF_CLI_STATS_NONE;
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "volume name get failed");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Volume %s does not exists", volname);
+
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "op", SLEN("op"), &stats_op);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "volume profile op get failed");
+ goto out;
+ }
+
+ switch (stats_op) {
case GF_CLI_STATS_START:
- ret = glusterd_add_profile_volume_options (volinfo);
- if (ret)
- goto out;
- break;
+ ret = glusterd_add_profile_volume_options(volinfo);
+ if (ret)
+ goto out;
+ break;
case GF_CLI_STATS_STOP:
- glusterd_remove_profile_volume_options (volinfo);
- break;
+ glusterd_remove_profile_volume_options(volinfo);
+ break;
case GF_CLI_STATS_INFO:
case GF_CLI_STATS_TOP:
- //info is already collected in brick op.
- //just goto out;
- ret = 0;
- goto out;
- break;
+ // info is already collected in brick op.
+ // just goto out;
+ ret = 0;
+ goto out;
+ break;
default:
- GF_ASSERT (0);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_INVALID_ENTRY, "Invalid profile op: %d",
- stats_op);
- ret = -1;
- goto out;
- break;
- }
- ret = glusterd_create_volfiles_and_notify_services (volinfo);
+ GF_ASSERT(0);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
+ "Invalid profile op: %d", stats_op);
+ ret = -1;
+ goto out;
+ break;
+ }
+ ret = glusterd_create_volfiles_and_notify_services(volinfo);
+
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
+ "Unable to create volfile for"
+ " 'volume set'");
+ ret = -1;
+ goto out;
+ }
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOLFILE_CREATE_FAIL,
- "Unable to create volfile for"
- " 'volume set'");
- ret = -1;
- goto out;
- }
+ ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret)
+ goto out;
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (GLUSTERD_STATUS_STARTED == volinfo->status) {
+ ret = glusterd_svcs_reconfigure();
if (ret)
- goto out;
+ goto out;
+ }
- if (GLUSTERD_STATUS_STARTED == volinfo->status) {
- ret = glusterd_svcs_reconfigure ();
- if (ret)
- goto out;
- }
-
- ret = 0;
+ ret = 0;
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
- return ret;
+ return ret;
}
static int
-_add_remove_bricks_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo,
- char *prefix)
+_add_remove_bricks_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo,
+ char *prefix)
{
- int ret = -1;
- int count = 0;
- int i = 0;
- char brick_key[1024] = {0,};
- char dict_key[1024] ={0,};
- int keylen;
- char *brick = NULL;
- xlator_t *this = NULL;
-
- GF_ASSERT (dict);
- GF_ASSERT (volinfo);
- GF_ASSERT (prefix);
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = dict_get_int32n (volinfo->rebal.dict, "count",
- SLEN ("count"), &count);
+ int ret = -1;
+ int count = 0;
+ int i = 0;
+ char brick_key[1024] = {
+ 0,
+ };
+ char dict_key[1024] = {
+ 0,
+ };
+ int keylen;
+ char *brick = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(dict);
+ GF_ASSERT(volinfo);
+ GF_ASSERT(prefix);
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_int32n(volinfo->rebal.dict, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get brick count");
+ goto out;
+ }
+
+ keylen = snprintf(dict_key, sizeof(dict_key), "%s.count", prefix);
+ ret = dict_set_int32n(dict, dict_key, keylen, count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set brick count in dict");
+ goto out;
+ }
+
+ for (i = 1; i <= count; i++) {
+ keylen = snprintf(brick_key, sizeof(brick_key), "brick%d", i);
+
+ ret = dict_get_strn(volinfo->rebal.dict, brick_key, keylen, &brick);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Failed to get brick count");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get %s", brick_key);
+ goto out;
}
- keylen = snprintf (dict_key, sizeof (dict_key), "%s.count", prefix);
- ret = dict_set_int32n (dict, dict_key, keylen, count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set brick count in dict");
- goto out;
+ keylen = snprintf(dict_key, sizeof(dict_key), "%s.%s", prefix,
+ brick_key);
+ if ((keylen < 0) || (keylen >= sizeof(dict_key))) {
+ ret = -1;
+ goto out;
}
-
- for (i = 1; i <= count; i++) {
- keylen = snprintf (brick_key, sizeof (brick_key),
- "brick%d", i);
-
- ret = dict_get_strn (volinfo->rebal.dict, brick_key,
- keylen, &brick);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get %s", brick_key);
- goto out;
- }
-
- keylen = snprintf (dict_key, sizeof (dict_key),
- "%s.%s", prefix, brick_key);
- if ((keylen < 0) || (keylen >= sizeof(dict_key))) {
- ret = -1;
- goto out;
- }
- ret = dict_set_strn (dict, dict_key, keylen, brick);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to add brick to dict");
- goto out;
- }
- brick = NULL;
+ ret = dict_set_strn(dict, dict_key, keylen, brick);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to add brick to dict");
+ goto out;
}
+ brick = NULL;
+ }
out:
- return ret;
+ return ret;
}
/* This adds the respective task-id and all available parameters of a task into
* a dictionary
*/
static int
-_add_task_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
+_add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
{
-
- int ret = -1;
- char key[64] = {0,};
- int keylen;
- char *uuid_str = NULL;
- int status = 0;
- xlator_t *this = NULL;
-
- GF_ASSERT (dict);
- GF_ASSERT (volinfo);
-
- this = THIS;
- GF_ASSERT (this);
-
- switch (op) {
+ int ret = -1;
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+ char *uuid_str = NULL;
+ int status = 0;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(dict);
+ GF_ASSERT(volinfo);
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ switch (op) {
case GD_OP_REMOVE_TIER_BRICK:
case GD_OP_REMOVE_BRICK:
- snprintf (key, sizeof (key), "task%d", index);
- ret = _add_remove_bricks_to_dict (dict, volinfo, key);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_ADD_REMOVE_BRICK_FAIL,
- "Failed to add remove bricks to dict");
- goto out;
- }
+ snprintf(key, sizeof(key), "task%d", index);
+ ret = _add_remove_bricks_to_dict(dict, volinfo, key);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_ADD_REMOVE_BRICK_FAIL,
+ "Failed to add remove bricks to dict");
+ goto out;
+ }
case GD_OP_TIER_MIGRATE:
case GD_OP_REBALANCE:
- uuid_str = gf_strdup (uuid_utoa (volinfo->rebal.rebalance_id));
- status = volinfo->rebal.defrag_status;
- break;
+ uuid_str = gf_strdup(uuid_utoa(volinfo->rebal.rebalance_id));
+ status = volinfo->rebal.defrag_status;
+ break;
default:
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_NO_TASK_ID, "%s operation doesn't have a"
- " task_id", gd_op_list[op]);
- goto out;
- }
-
- keylen = snprintf (key, sizeof (key), "task%d.type", index);
- ret = dict_set_strn (dict, key, keylen, (char *)gd_op_list[op]);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Error setting task type in dict");
- goto out;
- }
-
- keylen = snprintf (key, sizeof (key), "task%d.id", index);
-
- if (!uuid_str)
- goto out;
- ret = dict_set_dynstrn (dict, key, keylen, uuid_str);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Error setting task id in dict");
- goto out;
- }
- uuid_str = NULL;
-
- keylen = snprintf (key, sizeof (key), "task%d.status", index);
- ret = dict_set_int32n (dict, key, keylen, status);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Error setting task status in dict");
- goto out;
- }
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_TASK_ID,
+ "%s operation doesn't have a"
+ " task_id",
+ gd_op_list[op]);
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "task%d.type", index);
+ ret = dict_set_strn(dict, key, keylen, (char *)gd_op_list[op]);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Error setting task type in dict");
+ goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "task%d.id", index);
+
+ if (!uuid_str)
+ goto out;
+ ret = dict_set_dynstrn(dict, key, keylen, uuid_str);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Error setting task id in dict");
+ goto out;
+ }
+ uuid_str = NULL;
+
+ keylen = snprintf(key, sizeof(key), "task%d.status", index);
+ ret = dict_set_int32n(dict, key, keylen, status);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Error setting task status in dict");
+ goto out;
+ }
out:
- if (uuid_str)
- GF_FREE (uuid_str);
- return ret;
+ if (uuid_str)
+ GF_FREE(uuid_str);
+ return ret;
}
static int
-glusterd_aggregate_task_status (dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
+glusterd_aggregate_task_status(dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
{
- int ret = -1;
- int tasks = 0;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
+ int ret = -1;
+ int tasks = 0;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
- if (!gf_uuid_is_null (volinfo->rebal.rebalance_id)) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- if (conf->op_version > GD_OP_VERSION_3_10_0)
- goto done;
- if (volinfo->rebal.op == GD_OP_REMOVE_BRICK)
- ret = _add_task_to_dict (rsp_dict,
- volinfo,
- GD_OP_REMOVE_TIER_BRICK,
- tasks);
- else if (volinfo->rebal.op == GD_OP_REBALANCE)
- ret = _add_task_to_dict (rsp_dict,
- volinfo,
- GD_OP_TIER_MIGRATE,
- tasks);
- } else
- ret = _add_task_to_dict (rsp_dict, volinfo,
- volinfo->rebal.op, tasks);
+ if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ if (conf->op_version > GD_OP_VERSION_3_10_0)
+ goto done;
+ if (volinfo->rebal.op == GD_OP_REMOVE_BRICK)
+ ret = _add_task_to_dict(rsp_dict, volinfo,
+ GD_OP_REMOVE_TIER_BRICK, tasks);
+ else if (volinfo->rebal.op == GD_OP_REBALANCE)
+ ret = _add_task_to_dict(rsp_dict, volinfo, GD_OP_TIER_MIGRATE,
+ tasks);
+ } else
+ ret = _add_task_to_dict(rsp_dict, volinfo, volinfo->rebal.op,
+ tasks);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to add task details to dict");
- goto out;
- }
- tasks++;
- }
-done:
- ret = dict_set_int32n (rsp_dict, "tasks", SLEN ("tasks"), tasks);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Error setting tasks count in dict");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to add task details to dict");
+ goto out;
}
- ret = 0;
+ tasks++;
+ }
+done:
+ ret = dict_set_int32n(rsp_dict, "tasks", SLEN("tasks"), tasks);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Error setting tasks count in dict");
+ goto out;
+ }
+ ret = 0;
out:
- return ret;
+ return ret;
}
static int
-glusterd_op_status_volume (dict_t *dict, char **op_errstr,
- dict_t *rsp_dict)
+glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- int node_count = 0;
- int brick_index = -1;
- int other_count = 0;
- int hot_brick_count = -1;
- int other_index = 0;
- uint32_t cmd = 0;
- char *volname = NULL;
- char *brick = NULL;
- xlator_t *this = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_conf_t *priv = NULL;
- dict_t *vol_opts = NULL;
- gf_boolean_t nfs_disabled = _gf_false;
- gf_boolean_t shd_enabled = _gf_false;
- gf_boolean_t origin_glusterd = _gf_false;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
-
- GF_ASSERT (priv);
-
- GF_ASSERT (dict);
-
- origin_glusterd = is_origin_glusterd (dict);
-
- ret = dict_get_uint32 (dict, "cmd", &cmd);
+ int ret = -1;
+ int node_count = 0;
+ int brick_index = -1;
+ int other_count = 0;
+ int hot_brick_count = -1;
+ int other_index = 0;
+ uint32_t cmd = 0;
+ char *volname = NULL;
+ char *brick = NULL;
+ xlator_t *this = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ dict_t *vol_opts = NULL;
+ gf_boolean_t nfs_disabled = _gf_false;
+ gf_boolean_t shd_enabled = _gf_false;
+ gf_boolean_t origin_glusterd = _gf_false;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+
+ GF_ASSERT(priv);
+
+ GF_ASSERT(dict);
+
+ origin_glusterd = is_origin_glusterd(dict);
+
+ ret = dict_get_uint32(dict, "cmd", &cmd);
+ if (ret)
+ goto out;
+
+ if (origin_glusterd) {
+ ret = 0;
+ if ((cmd & GF_CLI_STATUS_ALL)) {
+ ret = glusterd_get_all_volnames(rsp_dict);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAMES_GET_FAIL,
+ "failed to get all volume "
+ "names for status");
+ }
+ }
+
+ ret = dict_set_uint32(rsp_dict, "cmd", cmd);
+ if (ret)
+ goto out;
+
+ if (cmd & GF_CLI_STATUS_ALL)
+ goto out;
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret)
+ goto out;
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Volume with name: %s "
+ "does not exist",
+ volname);
+ goto out;
+ }
+ vol_opts = volinfo->dict;
+
+ if ((cmd & GF_CLI_STATUS_NFS) != 0) {
+ ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict, 0,
+ vol_opts);
if (ret)
- goto out;
-
- if (origin_glusterd) {
- ret = 0;
- if ((cmd & GF_CLI_STATUS_ALL)) {
- ret = glusterd_get_all_volnames (rsp_dict);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOLNAMES_GET_FAIL,
- "failed to get all volume "
- "names for status");
- }
- }
+ goto out;
+ other_count++;
+ node_count++;
- ret = dict_set_uint32 (rsp_dict, "cmd", cmd);
+ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict, 0,
+ vol_opts);
if (ret)
- goto out;
+ goto out;
+ other_count++;
+ node_count++;
- if (cmd & GF_CLI_STATUS_ALL)
- goto out;
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+ } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
+ ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict, 0,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+ } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
+ ret = glusterd_add_node_to_dict(priv->scrub_svc.name, rsp_dict, 0,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+ } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
+ ret = glusterd_add_tierd_to_dict(volinfo, rsp_dict, other_index);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+ } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
+ ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict, other_index);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+ } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
+ ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick);
+ if (ret)
+ goto out;
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
+ ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
+ _gf_false);
if (ret)
- goto out;
+ goto out;
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "Volume with name: %s "
- "does not exist", volname);
- goto out;
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
+ goto out;
+
+ glusterd_add_brick_to_dict(volinfo, brickinfo, rsp_dict, ++brick_index);
+ if (cmd & GF_CLI_STATUS_DETAIL)
+ glusterd_add_brick_detail_to_dict(volinfo, brickinfo, rsp_dict,
+ brick_index);
+ node_count++;
+
+ } else if ((cmd & GF_CLI_STATUS_TASKS) != 0) {
+ ret = glusterd_aggregate_task_status(rsp_dict, volinfo);
+ goto out;
+
+ } else {
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ brick_index++;
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
+ continue;
+
+ glusterd_add_brick_to_dict(volinfo, brickinfo, rsp_dict,
+ brick_index);
+
+ if (cmd & GF_CLI_STATUS_DETAIL) {
+ glusterd_add_brick_detail_to_dict(volinfo, brickinfo, rsp_dict,
+ brick_index);
+ }
+ node_count++;
}
- vol_opts = volinfo->dict;
- if ((cmd & GF_CLI_STATUS_NFS) != 0) {
- ret = glusterd_add_node_to_dict (priv->nfs_svc.name, rsp_dict,
- 0, vol_opts);
+ if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
+ other_index = brick_index + 1;
+ if (glusterd_is_snapd_enabled(volinfo)) {
+ ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict,
+ other_index);
if (ret)
- goto out;
+ goto out;
other_count++;
+ other_index++;
node_count++;
+ }
- } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- ret = glusterd_add_node_to_dict (priv->shd_svc.name, rsp_dict,
- 0, vol_opts);
+ if (glusterd_is_tierd_enabled(volinfo)) {
+ ret = glusterd_add_tierd_to_dict(volinfo, rsp_dict,
+ other_index);
if (ret)
- goto out;
+ goto out;
other_count++;
+ other_index++;
node_count++;
+ }
- } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
- ret = glusterd_add_node_to_dict (priv->quotad_svc.name,
- rsp_dict, 0, vol_opts);
+ nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
+ _gf_false);
+ if (!nfs_disabled) {
+ ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict,
+ other_index, vol_opts);
if (ret)
- goto out;
+ goto out;
+ other_index++;
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
- ret = glusterd_add_node_to_dict (priv->bitd_svc.name,
- rsp_dict, 0, vol_opts);
+ }
+
+ if (glusterd_is_shd_compatible_volume(volinfo))
+ shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
+ if (shd_enabled) {
+ ret = glusterd_add_node_to_dict(priv->shd_svc.name, rsp_dict,
+ other_index, vol_opts);
if (ret)
- goto out;
+ goto out;
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
- ret = glusterd_add_node_to_dict (priv->scrub_svc.name,
- rsp_dict, 0, vol_opts);
+ other_index++;
+ }
+
+ if (glusterd_is_volume_quota_enabled(volinfo)) {
+ ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict,
+ other_index, vol_opts);
if (ret)
- goto out;
+ goto out;
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
- ret = glusterd_add_tierd_to_dict (volinfo, rsp_dict,
- other_index);
+ other_index++;
+ }
+
+ if (glusterd_is_bitrot_enabled(volinfo)) {
+ ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict,
+ other_index, vol_opts);
if (ret)
- goto out;
+ goto out;
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
- ret = glusterd_add_snapd_to_dict (volinfo, rsp_dict,
- other_index);
+ other_index++;
+ }
+
+ /* For handling scrub status. Scrub daemon will be
+ * running automatically when bitrot is enable*/
+ if (glusterd_is_bitrot_enabled(volinfo)) {
+ ret = glusterd_add_node_to_dict(priv->scrub_svc.name, rsp_dict,
+ other_index, vol_opts);
if (ret)
- goto out;
+ goto out;
other_count++;
node_count++;
- } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
- ret = dict_get_strn (dict, "brick", SLEN ("brick"), &brick);
- if (ret)
- goto out;
-
- ret = glusterd_volume_brickinfo_get_by_brick (brick,
- volinfo,
- &brickinfo,
- _gf_false);
- if (ret)
- goto out;
-
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID))
- goto out;
-
- glusterd_add_brick_to_dict (volinfo, brickinfo, rsp_dict,
- ++brick_index);
- if (cmd & GF_CLI_STATUS_DETAIL)
- glusterd_add_brick_detail_to_dict (volinfo, brickinfo,
- rsp_dict,
- brick_index);
- node_count++;
-
- } else if ((cmd & GF_CLI_STATUS_TASKS) != 0) {
- ret = glusterd_aggregate_task_status (rsp_dict, volinfo);
- goto out;
-
- } else {
- cds_list_for_each_entry (brickinfo, &volinfo->bricks,
- brick_list) {
- brick_index++;
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID))
- continue;
-
- glusterd_add_brick_to_dict (volinfo, brickinfo,
- rsp_dict, brick_index);
-
- if (cmd & GF_CLI_STATUS_DETAIL) {
- glusterd_add_brick_detail_to_dict (volinfo,
- brickinfo,
- rsp_dict,
- brick_index);
- }
- node_count++;
- }
-
- if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
- other_index = brick_index + 1;
- if (glusterd_is_snapd_enabled (volinfo)) {
- ret = glusterd_add_snapd_to_dict (volinfo,
- rsp_dict,
- other_index);
- if (ret)
- goto out;
- other_count++;
- other_index++;
- node_count++;
- }
-
- if (glusterd_is_tierd_enabled (volinfo)) {
- ret = glusterd_add_tierd_to_dict (volinfo,
- rsp_dict,
- other_index);
- if (ret)
- goto out;
- other_count++;
- other_index++;
- node_count++;
- }
-
- nfs_disabled = dict_get_str_boolean (vol_opts,
- NFS_DISABLE_MAP_KEY,
- _gf_false);
- if (!nfs_disabled) {
- ret = glusterd_add_node_to_dict
- (priv->nfs_svc.name,
- rsp_dict,
- other_index,
- vol_opts);
- if (ret)
- goto out;
- other_index++;
- other_count++;
- node_count++;
- }
-
- if (glusterd_is_shd_compatible_volume (volinfo))
- shd_enabled = gd_is_self_heal_enabled
- (volinfo, vol_opts);
- if (shd_enabled) {
- ret = glusterd_add_node_to_dict
- (priv->shd_svc.name, rsp_dict,
- other_index, vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
- other_index++;
- }
-
- if (glusterd_is_volume_quota_enabled (volinfo)) {
- ret = glusterd_add_node_to_dict
- (priv->quotad_svc.name,
- rsp_dict,
- other_index,
- vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
- other_index++;
- }
-
- if (glusterd_is_bitrot_enabled (volinfo)) {
- ret = glusterd_add_node_to_dict
- (priv->bitd_svc.name,
- rsp_dict,
- other_index,
- vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
- other_index++;
- }
-
- /* For handling scrub status. Scrub daemon will be
- * running automatically when bitrot is enable*/
- if (glusterd_is_bitrot_enabled (volinfo)) {
- ret = glusterd_add_node_to_dict
- (priv->scrub_svc.name,
- rsp_dict,
- other_index,
- vol_opts);
- if (ret)
- goto out;
- other_count++;
- node_count++;
- }
- }
- }
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- hot_brick_count = volinfo->tier_info.hot_brick_count;
- ret = dict_set_int32n (rsp_dict, "hot_brick_count",
- SLEN ("hot_brick_count"), hot_brick_count);
- if (ret)
- goto out;
-
- ret = dict_set_int32n (rsp_dict, "type", SLEN ("type"),
- volinfo->type);
- if (ret)
- goto out;
-
- ret = dict_set_int32n (rsp_dict, "brick-index-max",
- SLEN ("brick-index-max"), brick_index);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Error setting brick-index-max to dict");
- goto out;
- }
- ret = dict_set_int32n (rsp_dict, "other-count",
- SLEN ("other-count"), other_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Error setting other-count to dict");
- goto out;
- }
- ret = dict_set_int32n (rsp_dict, "count", SLEN ("count"), node_count);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Error setting node count to dict");
- goto out;
- }
-
- /* Active tasks */
- /* Tasks are added only for normal volume status request for either a
- * single volume or all volumes
- */
- if (!glusterd_status_has_tasks (cmd))
- goto out;
-
- ret = glusterd_aggregate_task_status (rsp_dict, volinfo);
- if (ret)
- goto out;
- ret = 0;
+ }
+ }
+ }
+
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER)
+ hot_brick_count = volinfo->tier_info.hot_brick_count;
+ ret = dict_set_int32n(rsp_dict, "hot_brick_count", SLEN("hot_brick_count"),
+ hot_brick_count);
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32n(rsp_dict, "type", SLEN("type"), volinfo->type);
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32n(rsp_dict, "brick-index-max", SLEN("brick-index-max"),
+ brick_index);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Error setting brick-index-max to dict");
+ goto out;
+ }
+ ret = dict_set_int32n(rsp_dict, "other-count", SLEN("other-count"),
+ other_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Error setting other-count to dict");
+ goto out;
+ }
+ ret = dict_set_int32n(rsp_dict, "count", SLEN("count"), node_count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Error setting node count to dict");
+ goto out;
+ }
+
+ /* Active tasks */
+ /* Tasks are added only for normal volume status request for either a
+ * single volume or all volumes
+ */
+ if (!glusterd_status_has_tasks(cmd))
+ goto out;
+
+ ret = glusterd_aggregate_task_status(rsp_dict, volinfo);
+ if (ret)
+ goto out;
+ ret = 0;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_op_ac_none (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_none(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
+ int ret = 0;
- gf_msg_debug (THIS->name, 0, "Returning with %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_op_sm_locking_failed (uuid_t *txn_id)
+glusterd_op_sm_locking_failed(uuid_t *txn_id)
{
- int ret = -1;
+ int ret = -1;
- opinfo.op_ret = -1;
- opinfo.op_errstr = gf_strdup ("locking failed for one of the peer.");
+ opinfo.op_ret = -1;
+ opinfo.op_errstr = gf_strdup("locking failed for one of the peer.");
- ret = glusterd_set_txn_opinfo (txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
- /* Inject a reject event such that unlocking gets triggered right away*/
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, txn_id, NULL);
+ ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+ /* Inject a reject event such that unlocking gets triggered right away*/
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, txn_id, NULL);
- return ret;
+ return ret;
}
static int
-glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- rpc_clnt_procedure_t *proc = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- uint32_t pending_count = 0;
- dict_t *dict = NULL;
-
- this = THIS;
- priv = this->private;
- GF_ASSERT (priv);
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > opinfo.txn_generation)
- continue;
-
- if (!peerinfo->connected || !peerinfo->mgmt)
- continue;
- if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
- (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
- continue;
-
- /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
- if (priv->op_version < GD_OP_VERSION_3_6_0) {
- proc = &peerinfo->mgmt->proctable
- [GLUSTERD_MGMT_CLUSTER_LOCK];
- if (proc->fn) {
- ret = proc->fn (NULL, this, peerinfo);
- if (ret) {
- rcu_read_unlock ();
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_LOCK_REQ_SEND_FAIL,
- "Failed to send lock request "
- "for operation 'Volume %s' to "
- "peer %s",
- gd_op_list[opinfo.op],
- peerinfo->hostname);
- goto out;
- }
- /* Mark the peer as locked*/
- peerinfo->locked = _gf_true;
- pending_count++;
- }
- } else {
- dict = glusterd_op_get_ctx ();
- dict_ref (dict);
-
- proc = &peerinfo->mgmt_v3->proctable
- [GLUSTERD_MGMT_V3_LOCK];
- if (proc->fn) {
- ret = dict_set_static_ptr (dict, "peerinfo",
- peerinfo);
- if (ret) {
- rcu_read_unlock ();
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "failed to set peerinfo");
- dict_unref (dict);
- goto out;
- }
-
- ret = proc->fn (NULL, this, dict);
- if (ret) {
- rcu_read_unlock ();
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_MGMTV3_LOCK_REQ_SEND_FAIL,
- "Failed to send mgmt_v3 lock "
- "request for operation "
- "'Volume %s' to peer %s",
- gd_op_list[opinfo.op],
- peerinfo->hostname);
- dict_unref (dict);
- goto out;
- }
- /* Mark the peer as locked*/
- peerinfo->locked = _gf_true;
- pending_count++;
- }
- }
- }
- rcu_read_unlock ();
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ uint32_t pending_count = 0;
+ dict_t *dict = NULL;
+
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > opinfo.txn_generation)
+ continue;
- opinfo.pending_count = pending_count;
+ if (!peerinfo->connected || !peerinfo->mgmt)
+ continue;
+ if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
+ (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
+ continue;
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_3_6_0) {
+ proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_LOCK];
+ if (proc->fn) {
+ ret = proc->fn(NULL, this, peerinfo);
+ if (ret) {
+ rcu_read_unlock();
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_LOCK_REQ_SEND_FAIL,
+ "Failed to send lock request "
+ "for operation 'Volume %s' to "
+ "peer %s",
+ gd_op_list[opinfo.op], peerinfo->hostname);
+ goto out;
+ }
+ /* Mark the peer as locked*/
+ peerinfo->locked = _gf_true;
+ pending_count++;
+ }
+ } else {
+ dict = glusterd_op_get_ctx();
+ dict_ref(dict);
+ proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_LOCK];
+ if (proc->fn) {
+ ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
+ if (ret) {
+ rcu_read_unlock();
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set peerinfo");
+ dict_unref(dict);
+ goto out;
+ }
- if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
+ ret = proc->fn(NULL, this, dict);
+ if (ret) {
+ rcu_read_unlock();
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_MGMTV3_LOCK_REQ_SEND_FAIL,
+ "Failed to send mgmt_v3 lock "
+ "request for operation "
+ "'Volume %s' to peer %s",
+ gd_op_list[opinfo.op], peerinfo->hostname);
+ dict_unref(dict);
+ goto out;
+ }
+ /* Mark the peer as locked*/
+ peerinfo->locked = _gf_true;
+ pending_count++;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ opinfo.pending_count = pending_count;
+
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+
+ if (!opinfo.pending_count)
+ ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
out:
- if (ret)
- ret = glusterd_op_sm_locking_failed (&event->txn_id);
+ if (ret)
+ ret = glusterd_op_sm_locking_failed(&event->txn_id);
- gf_msg_debug (this->name, 0, "Returning with %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ return ret;
}
static int
-glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_send_unlock(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- rpc_clnt_procedure_t *proc = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- uint32_t pending_count = 0;
- dict_t *dict = NULL;
-
- this = THIS;
- priv = this->private;
- GF_ASSERT (priv);
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > opinfo.txn_generation)
- continue;
-
- if (!peerinfo->connected || !peerinfo->mgmt ||
- !peerinfo->locked)
- continue;
- if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
- (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
- continue;
- /* Based on the op_version,
- * release the cluster or mgmt_v3 lock */
- if (priv->op_version < GD_OP_VERSION_3_6_0) {
- proc = &peerinfo->mgmt->proctable
- [GLUSTERD_MGMT_CLUSTER_UNLOCK];
- if (proc->fn) {
- ret = proc->fn (NULL, this, peerinfo);
- if (ret) {
- opinfo.op_errstr = gf_strdup
- ("Unlocking failed for one of "
- "the peer.");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CLUSTER_UNLOCK_FAILED,
- "Unlocking failed for operation"
- " volume %s on peer %s",
- gd_op_list[opinfo.op],
- peerinfo->hostname);
- continue;
- }
- pending_count++;
- peerinfo->locked = _gf_false;
- }
- } else {
- dict = glusterd_op_get_ctx ();
- dict_ref (dict);
-
- proc = &peerinfo->mgmt_v3->proctable
- [GLUSTERD_MGMT_V3_UNLOCK];
- if (proc->fn) {
- ret = dict_set_static_ptr (dict, "peerinfo",
- peerinfo);
- if (ret) {
- opinfo.op_errstr = gf_strdup
- ("Unlocking failed for one of the "
- "peer.");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CLUSTER_UNLOCK_FAILED,
- "Unlocking failed for operation"
- " volume %s on peer %s",
- gd_op_list[opinfo.op],
- peerinfo->hostname);
- dict_unref (dict);
- continue;
- }
-
- ret = proc->fn (NULL, this, dict);
- if (ret) {
- opinfo.op_errstr = gf_strdup
- ("Unlocking failed for one of the "
- "peer.");
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_CLUSTER_UNLOCK_FAILED,
- "Unlocking failed for operation"
- " volume %s on peer %s",
- gd_op_list[opinfo.op],
- peerinfo->hostname);
- dict_unref (dict);
- continue;
- }
- pending_count++;
- peerinfo->locked = _gf_false;
- }
- }
- }
- rcu_read_unlock ();
-
- opinfo.pending_count = pending_count;
-
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
-
- if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ uint32_t pending_count = 0;
+ dict_t *dict = NULL;
+
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > opinfo.txn_generation)
+ continue;
+
+ if (!peerinfo->connected || !peerinfo->mgmt || !peerinfo->locked)
+ continue;
+ if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
+ (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
+ continue;
+ /* Based on the op_version,
+ * release the cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_3_6_0) {
+ proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_UNLOCK];
+ if (proc->fn) {
+ ret = proc->fn(NULL, this, peerinfo);
+ if (ret) {
+ opinfo.op_errstr = gf_strdup(
+ "Unlocking failed for one of "
+ "the peer.");
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "Unlocking failed for operation"
+ " volume %s on peer %s",
+ gd_op_list[opinfo.op], peerinfo->hostname);
+ continue;
+ }
+ pending_count++;
+ peerinfo->locked = _gf_false;
+ }
+ } else {
+ dict = glusterd_op_get_ctx();
+ dict_ref(dict);
- gf_msg_debug (this->name, 0, "Returning with %d", ret);
- return ret;
+ proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_UNLOCK];
+ if (proc->fn) {
+ ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
+ if (ret) {
+ opinfo.op_errstr = gf_strdup(
+ "Unlocking failed for one of the "
+ "peer.");
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "Unlocking failed for operation"
+ " volume %s on peer %s",
+ gd_op_list[opinfo.op], peerinfo->hostname);
+ dict_unref(dict);
+ continue;
+ }
+
+ ret = proc->fn(NULL, this, dict);
+ if (ret) {
+ opinfo.op_errstr = gf_strdup(
+ "Unlocking failed for one of the "
+ "peer.");
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_CLUSTER_UNLOCK_FAILED,
+ "Unlocking failed for operation"
+ " volume %s on peer %s",
+ gd_op_list[opinfo.op], peerinfo->hostname);
+ dict_unref(dict);
+ continue;
+ }
+ pending_count++;
+ peerinfo->locked = _gf_false;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ opinfo.pending_count = pending_count;
+
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+
+ if (!opinfo.pending_count)
+ ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
+
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ return ret;
}
static int
-glusterd_op_ac_ack_drain (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_ack_drain(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
+ int ret = 0;
- if (opinfo.pending_count > 0)
- opinfo.pending_count--;
+ if (opinfo.pending_count > 0)
+ opinfo.pending_count--;
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ if (!opinfo.pending_count)
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
+ NULL);
+ gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
- if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
- &event->txn_id, NULL);
-
- gf_msg_debug (THIS->name, 0, "Returning with %d", ret);
-
- return ret;
+ return ret;
}
static int
-glusterd_op_ac_send_unlock_drain (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_send_unlock_drain(glusterd_op_sm_event_t *event, void *ctx)
{
- return glusterd_op_ac_ack_drain (event, ctx);
+ return glusterd_op_ac_ack_drain(event, ctx);
}
static int
-glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_lock(glusterd_op_sm_event_t *event, void *ctx)
{
- int32_t ret = 0;
- char *volname = NULL;
- char *globalname = NULL;
- glusterd_op_lock_ctx_t *lock_ctx = NULL;
- xlator_t *this = NULL;
- uint32_t op_errno = 0;
- glusterd_conf_t *conf = NULL;
- uint32_t timeout = 0;
-
- GF_ASSERT (event);
- GF_ASSERT (ctx);
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
-
- /* If the req came from a node running on older op_version
- * the dict won't be present. Based on it acquiring a cluster
- * or mgmt_v3 lock */
- if (lock_ctx->dict == NULL) {
- ret = glusterd_lock (lock_ctx->uuid);
- glusterd_op_lock_send_resp (lock_ctx->req, ret);
- } else {
- /* Cli will add timeout key to dict if the default timeout is
- * other than 2 minutes. Here we use this value to check whether
- * mgmt_v3_lock_timeout should be set to default value or we
- * need to change the value according to timeout value
- * i.e, timeout + 120 seconds. */
- ret = dict_get_uint32 (lock_ctx->dict, "timeout", &timeout);
- if (!ret)
- conf->mgmt_v3_lock_timeout = timeout + 120;
-
- ret = dict_get_strn (lock_ctx->dict, "volname",
- SLEN ("volname"), &volname);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to acquire volname");
- else {
- ret = glusterd_mgmt_v3_lock (volname, lock_ctx->uuid,
- &op_errno, "vol");
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL,
- "Unable to acquire lock for %s",
- volname);
- goto out;
- }
- ret = dict_get_strn (lock_ctx->dict, "globalname",
- SLEN ("globalname"), &globalname);
- if (!ret) {
- ret = glusterd_mgmt_v3_lock (globalname, lock_ctx->uuid,
- &op_errno, "global");
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_LOCK_GET_FAIL,
- "Unable to acquire lock for %s",
- globalname);
-
- }
-out:
- glusterd_op_mgmt_v3_lock_send_resp (lock_ctx->req,
- &event->txn_id, ret);
-
- dict_unref (lock_ctx->dict);
+ int32_t ret = 0;
+ char *volname = NULL;
+ char *globalname = NULL;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+ xlator_t *this = NULL;
+ uint32_t op_errno = 0;
+ glusterd_conf_t *conf = NULL;
+ uint32_t timeout = 0;
+
+ GF_ASSERT(event);
+ GF_ASSERT(ctx);
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
+
+ lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
+
+ /* If the req came from a node running on older op_version
+ * the dict won't be present. Based on it acquiring a cluster
+ * or mgmt_v3 lock */
+ if (lock_ctx->dict == NULL) {
+ ret = glusterd_lock(lock_ctx->uuid);
+ glusterd_op_lock_send_resp(lock_ctx->req, ret);
+ } else {
+ /* Cli will add timeout key to dict if the default timeout is
+ * other than 2 minutes. Here we use this value to check whether
+ * mgmt_v3_lock_timeout should be set to default value or we
+ * need to change the value according to timeout value
+ * i.e, timeout + 120 seconds. */
+ ret = dict_get_uint32(lock_ctx->dict, "timeout", &timeout);
+ if (!ret)
+ conf->mgmt_v3_lock_timeout = timeout + 120;
+
+ ret = dict_get_strn(lock_ctx->dict, "volname", SLEN("volname"),
+ &volname);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to acquire volname");
+ else {
+ ret = glusterd_mgmt_v3_lock(volname, lock_ctx->uuid, &op_errno,
+ "vol");
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "Unable to acquire lock for %s", volname);
+ goto out;
+ }
+ ret = dict_get_strn(lock_ctx->dict, "globalname", SLEN("globalname"),
+ &globalname);
+ if (!ret) {
+ ret = glusterd_mgmt_v3_lock(globalname, lock_ctx->uuid, &op_errno,
+ "global");
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
+ "Unable to acquire lock for %s", globalname);
}
+ out:
+ glusterd_op_mgmt_v3_lock_send_resp(lock_ctx->req, &event->txn_id, ret);
- gf_msg_debug (THIS->name, 0, "Lock Returned %d", ret);
- return ret;
+ dict_unref(lock_ctx->dict);
+ }
+
+ gf_msg_debug(THIS->name, 0, "Lock Returned %d", ret);
+ return ret;
}
static int
-glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_unlock(glusterd_op_sm_event_t *event, void *ctx)
{
- int32_t ret = 0;
- char *volname = NULL;
- char *globalname = NULL;
- glusterd_op_lock_ctx_t *lock_ctx = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
-
- GF_ASSERT (event);
- GF_ASSERT (ctx);
-
- this = THIS;
- priv = this->private;
-
- lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
-
- /* If the req came from a node running on older op_version
- * the dict won't be present. Based on it releasing the cluster
- * or mgmt_v3 lock */
- if (lock_ctx->dict == NULL) {
- ret = glusterd_unlock (lock_ctx->uuid);
- glusterd_op_unlock_send_resp (lock_ctx->req, ret);
- } else {
- ret = dict_get_strn (lock_ctx->dict, "volname",
- SLEN ("volname"), &volname);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to acquire volname");
- else {
- ret = glusterd_mgmt_v3_unlock (volname, lock_ctx->uuid,
- "vol");
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Unable to release lock for %s",
- volname);
- goto out;
- }
-
- ret = dict_get_strn (lock_ctx->dict, "globalname",
- SLEN ("globalname"), &globalname);
- if (!ret) {
- ret = glusterd_mgmt_v3_unlock (globalname, lock_ctx->uuid,
- "global");
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Unable to release lock for %s",
- globalname);
-
- }
-out:
- glusterd_op_mgmt_v3_unlock_send_resp (lock_ctx->req,
- &event->txn_id, ret);
+ int32_t ret = 0;
+ char *volname = NULL;
+ char *globalname = NULL;
+ glusterd_op_lock_ctx_t *lock_ctx = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ GF_ASSERT(event);
+ GF_ASSERT(ctx);
+
+ this = THIS;
+ priv = this->private;
+
+ lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
+
+ /* If the req came from a node running on older op_version
+ * the dict won't be present. Based on it releasing the cluster
+ * or mgmt_v3 lock */
+ if (lock_ctx->dict == NULL) {
+ ret = glusterd_unlock(lock_ctx->uuid);
+ glusterd_op_unlock_send_resp(lock_ctx->req, ret);
+ } else {
+ ret = dict_get_strn(lock_ctx->dict, "volname", SLEN("volname"),
+ &volname);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to acquire volname");
+ else {
+ ret = glusterd_mgmt_v3_unlock(volname, lock_ctx->uuid, "vol");
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Unable to release lock for %s", volname);
+ goto out;
+ }
- dict_unref (lock_ctx->dict);
+ ret = dict_get_strn(lock_ctx->dict, "globalname", SLEN("globalname"),
+ &globalname);
+ if (!ret) {
+ ret = glusterd_mgmt_v3_unlock(globalname, lock_ctx->uuid, "global");
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Unable to release lock for %s", globalname);
}
+ out:
+ glusterd_op_mgmt_v3_unlock_send_resp(lock_ctx->req, &event->txn_id,
+ ret);
- gf_msg_debug (this->name, 0, "Unlock Returned %d", ret);
+ dict_unref(lock_ctx->dict);
+ }
- if (priv->pending_quorum_action)
- glusterd_do_quorum_action ();
- return ret;
+ gf_msg_debug(this->name, 0, "Unlock Returned %d", ret);
+
+ if (priv->pending_quorum_action)
+ glusterd_do_quorum_action();
+ return ret;
}
static int
-glusterd_op_ac_local_unlock (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_local_unlock(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- uuid_t *originator = NULL;
+ int ret = 0;
+ uuid_t *originator = NULL;
- GF_ASSERT (event);
- GF_ASSERT (ctx);
+ GF_ASSERT(event);
+ GF_ASSERT(ctx);
- originator = (uuid_t *) ctx;
+ originator = (uuid_t *)ctx;
- ret = glusterd_unlock (*originator);
+ ret = glusterd_unlock(*originator);
- gf_msg_debug (THIS->name, 0, "Unlock Returned %d", ret);
+ gf_msg_debug(THIS->name, 0, "Unlock Returned %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_op_ac_rcvd_lock_acc (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_rcvd_lock_acc(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
-
- GF_ASSERT (event);
+ int ret = 0;
- if (opinfo.pending_count > 0)
- opinfo.pending_count--;
+ GF_ASSERT(event);
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ if (opinfo.pending_count > 0)
+ opinfo.pending_count--;
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
- if (opinfo.pending_count > 0)
- goto out;
+ if (opinfo.pending_count > 0)
+ goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC,
- &event->txn_id, NULL);
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, &event->txn_id,
+ NULL);
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
out:
- return ret;
+ return ret;
}
int
-glusterd_dict_set_volid (dict_t *dict, char *volname, char **op_errstr)
+glusterd_dict_set_volid(dict_t *dict, char *volname, char **op_errstr)
{
- int ret = -1;
- glusterd_volinfo_t *volinfo = NULL;
- char *volid = NULL;
- char msg[1024] = {0,};
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- if (!dict || !volname)
- goto out;
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof (msg), FMTSTR_CHECK_VOL_EXISTS, volname);
- goto out;
- }
- volid = gf_strdup (uuid_utoa (volinfo->volume_id));
- if (!volid) {
- ret = -1;
- goto out;
- }
- ret = dict_set_dynstrn (dict, "vol-id", SLEN ("vol-id"), volid);
- if (ret) {
- snprintf (msg, sizeof (msg), "Failed to set volume id of volume"
- " %s", volname);
- GF_FREE (volid);
- goto out;
- }
+ int ret = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *volid = NULL;
+ char msg[1024] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ if (!dict || !volname)
+ goto out;
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+ volid = gf_strdup(uuid_utoa(volinfo->volume_id));
+ if (!volid) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstrn(dict, "vol-id", SLEN("vol-id"), volid);
+ if (ret) {
+ snprintf(msg, sizeof(msg),
+ "Failed to set volume id of volume"
+ " %s",
+ volname);
+ GF_FREE(volid);
+ goto out;
+ }
out:
- if (msg[0] != '\0') {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_ID_SET_FAIL, "%s", msg);
- *op_errstr = gf_strdup (msg);
- }
- return ret;
+ if (msg[0] != '\0') {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ID_SET_FAIL, "%s", msg);
+ *op_errstr = gf_strdup(msg);
+ }
+ return ret;
}
int
-gd_set_commit_hash (dict_t *dict)
+gd_set_commit_hash(dict_t *dict)
{
- struct timeval tv;
- uint32_t hash;
-
- /*
- * We need a commit hash that won't conflict with others we might have
- * set, or zero which is the implicit value if we never have. Using
- * seconds<<3 like this ensures that we'll only get a collision if two
- * consecutive rebalances are separated by exactly 2^29 seconds - about
- * 17 years - and even then there's only a 1/8 chance of a collision in
- * the low order bits. It's far more likely that this code will have
- * changed completely by then. If not, call me in 2031.
- *
- * P.S. Time zone changes? Yeah, right.
- */
- gettimeofday (&tv, NULL);
- hash = tv.tv_sec << 3;
-
- /*
- * Make sure at least one of those low-order bits is set. The extra
- * shifting is because not all machines have sub-millisecond time
- * resolution.
- */
- hash |= 1 << ((tv.tv_usec >> 10) % 3);
-
- return dict_set_uint32 (dict, "commit-hash", hash);
+ struct timeval tv;
+ uint32_t hash;
+
+ /*
+ * We need a commit hash that won't conflict with others we might have
+ * set, or zero which is the implicit value if we never have. Using
+ * seconds<<3 like this ensures that we'll only get a collision if two
+ * consecutive rebalances are separated by exactly 2^29 seconds - about
+ * 17 years - and even then there's only a 1/8 chance of a collision in
+ * the low order bits. It's far more likely that this code will have
+ * changed completely by then. If not, call me in 2031.
+ *
+ * P.S. Time zone changes? Yeah, right.
+ */
+ gettimeofday(&tv, NULL);
+ hash = tv.tv_sec << 3;
+
+ /*
+ * Make sure at least one of those low-order bits is set. The extra
+ * shifting is because not all machines have sub-millisecond time
+ * resolution.
+ */
+ hash |= 1 << ((tv.tv_usec >> 10) % 3);
+
+ return dict_set_uint32(dict, "commit-hash", hash);
}
int
-glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)
+glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx)
{
- int ret = -1;
- void *ctx = NULL;
- dict_t *dict = NULL;
- dict_t *req_dict = NULL;
- glusterd_op_t op = GD_OP_NONE;
- char *volname = NULL;
- uint32_t status_cmd = GF_CLI_STATUS_NONE;
- xlator_t *this = NULL;
- gf_boolean_t do_common = _gf_false;
-
- GF_ASSERT (req);
-
- this = THIS;
- GF_ASSERT (this);
-
- req_dict = dict_new ();
- if (!req_dict)
- goto out;
-
- if (!op_ctx) {
- op = glusterd_op_get_op ();
- ctx = (void*)glusterd_op_get_ctx ();
- if (!ctx) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_NO_OPTIONS_GIVEN, "Null Context for "
- "op %d", op);
- ret = -1;
- goto out;
- }
-
- } else {
+ int ret = -1;
+ void *ctx = NULL;
+ dict_t *dict = NULL;
+ dict_t *req_dict = NULL;
+ glusterd_op_t op = GD_OP_NONE;
+ char *volname = NULL;
+ uint32_t status_cmd = GF_CLI_STATUS_NONE;
+ xlator_t *this = NULL;
+ gf_boolean_t do_common = _gf_false;
+
+ GF_ASSERT(req);
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ req_dict = dict_new();
+ if (!req_dict)
+ goto out;
+
+ if (!op_ctx) {
+ op = glusterd_op_get_op();
+ ctx = (void *)glusterd_op_get_ctx();
+ if (!ctx) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_OPTIONS_GIVEN,
+ "Null Context for "
+ "op %d",
+ op);
+ ret = -1;
+ goto out;
+ }
+
+ } else {
#define GD_SYNC_OPCODE_KEY "sync-mgmt-operation"
- ret = dict_get_int32 (op_ctx, GD_SYNC_OPCODE_KEY, (int32_t*)&op);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Failed to get volume"
- " operation");
- goto out;
- }
- ctx = op_ctx;
-#undef GD_SYNC_OPCODE_KEY
+ ret = dict_get_int32(op_ctx, GD_SYNC_OPCODE_KEY, (int32_t *)&op);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get volume"
+ " operation");
+ goto out;
}
+ ctx = op_ctx;
+#undef GD_SYNC_OPCODE_KEY
+ }
+
+ dict = ctx;
+ switch (op) {
+ case GD_OP_CREATE_VOLUME: {
+ ++glusterfs_port;
+ ret = dict_set_int32n(dict, "port", SLEN("port"), glusterfs_port);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set port in "
+ "dictionary");
+ goto out;
+ }
+ dict_copy(dict, req_dict);
+ } break;
+
+ case GD_OP_GSYNC_CREATE:
+ case GD_OP_GSYNC_SET: {
+ ret = glusterd_op_gsync_args_get(dict, op_errstr, &volname, NULL,
+ NULL);
+ if (ret == 0) {
+ ret = glusterd_dict_set_volid(dict, volname, op_errstr);
+ if (ret)
+ goto out;
+ }
+ dict_copy(dict, req_dict);
+ } break;
+
+ case GD_OP_SET_VOLUME: {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_DICT_GET_FAILED,
+ "volname is not present in "
+ "operation ctx");
+ goto out;
+ }
+ if (strcmp(volname, "help") && strcmp(volname, "help-xml") &&
+ strcasecmp(volname, "all")) {
+ ret = glusterd_dict_set_volid(dict, volname, op_errstr);
+ if (ret)
+ goto out;
+ }
+ dict_unref(req_dict);
+ req_dict = dict_ref(dict);
+ } break;
- dict = ctx;
- switch (op) {
- case GD_OP_CREATE_VOLUME:
- {
- ++glusterfs_port;
- ret = dict_set_int32n (dict, "port",
- SLEN ("port"),
- glusterfs_port);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set port in "
- "dictionary");
- goto out;
- }
- dict_copy (dict, req_dict);
- }
- break;
-
- case GD_OP_GSYNC_CREATE:
- case GD_OP_GSYNC_SET:
- {
- ret = glusterd_op_gsync_args_get (dict,
- op_errstr,
- &volname,
- NULL, NULL);
- if (ret == 0) {
- ret = glusterd_dict_set_volid
- (dict, volname, op_errstr);
- if (ret)
- goto out;
- }
- dict_copy (dict, req_dict);
- }
- break;
-
- case GD_OP_SET_VOLUME:
- {
- ret = dict_get_strn (dict, "volname",
- SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_CRITICAL, 0,
- GD_MSG_DICT_GET_FAILED,
- "volname is not present in "
- "operation ctx");
- goto out;
- }
- if (strcmp (volname, "help") &&
- strcmp (volname, "help-xml") &&
- strcasecmp (volname, "all")) {
- ret = glusterd_dict_set_volid
- (dict, volname, op_errstr);
- if (ret)
- goto out;
- }
- dict_unref (req_dict);
- req_dict = dict_ref (dict);
- }
- break;
-
- case GD_OP_REMOVE_BRICK:
- case GD_OP_DETACH_TIER_STATUS:
- case GD_OP_REMOVE_TIER_BRICK:
- {
- dict_t *dict = ctx;
- ret = dict_get_strn (dict, "volname",
- SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_CRITICAL, 0,
- GD_MSG_DICT_GET_FAILED,
- "volname is not present in "
- "operation ctx");
- goto out;
- }
-
- ret = glusterd_dict_set_volid (dict, volname,
- op_errstr);
- if (ret)
- goto out;
-
- if (gd_set_commit_hash(dict) != 0) {
- goto out;
- }
-
- dict_unref (req_dict);
- req_dict = dict_ref (dict);
- }
- break;
-
- case GD_OP_STATUS_VOLUME:
- {
- ret = dict_get_uint32 (dict, "cmd",
- &status_cmd);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Status command not present "
- "in op ctx");
- goto out;
- }
- if (GF_CLI_STATUS_ALL & status_cmd) {
- dict_copy (dict, req_dict);
- break;
- }
- do_common = _gf_true;
- }
- break;
-
- case GD_OP_DELETE_VOLUME:
- case GD_OP_START_VOLUME:
- case GD_OP_STOP_VOLUME:
- case GD_OP_ADD_BRICK:
- case GD_OP_REPLACE_BRICK:
- case GD_OP_RESET_VOLUME:
- case GD_OP_LOG_ROTATE:
- case GD_OP_QUOTA:
- case GD_OP_PROFILE_VOLUME:
- case GD_OP_HEAL_VOLUME:
- case GD_OP_STATEDUMP_VOLUME:
- case GD_OP_CLEARLOCKS_VOLUME:
- case GD_OP_DEFRAG_BRICK_VOLUME:
- case GD_OP_BARRIER:
- case GD_OP_BITROT:
- case GD_OP_TIER_START_STOP:
- case GD_OP_TIER_STATUS:
- case GD_OP_SCRUB_STATUS:
- case GD_OP_SCRUB_ONDEMAND:
- case GD_OP_RESET_BRICK:
- {
- do_common = _gf_true;
- }
- break;
-
- case GD_OP_REBALANCE:
- {
- if (gd_set_commit_hash(dict) != 0) {
- goto out;
- }
- do_common = _gf_true;
- }
- break;
-
- case GD_OP_SYNC_VOLUME:
- case GD_OP_COPY_FILE:
- case GD_OP_SYS_EXEC:
- {
- dict_copy (dict, req_dict);
- }
- break;
-
- default:
- break;
- }
+ case GD_OP_REMOVE_BRICK:
+ case GD_OP_DETACH_TIER_STATUS:
+ case GD_OP_REMOVE_TIER_BRICK: {
+ dict_t *dict = ctx;
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_DICT_GET_FAILED,
+ "volname is not present in "
+ "operation ctx");
+ goto out;
+ }
- /*
- * This has been moved out of the switch so that multiple ops with
- * other special needs can all "fall through" to it.
- */
- if (do_common) {
- ret = dict_get_strn (dict, "volname", SLEN ("volname"),
- &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_CRITICAL, -ret,
- GD_MSG_DICT_GET_FAILED,
- "volname is not present in "
- "operation ctx");
- goto out;
- }
+ ret = glusterd_dict_set_volid(dict, volname, op_errstr);
+ if (ret)
+ goto out;
- if (strcasecmp (volname, "all")) {
- ret = glusterd_dict_set_volid (dict,
- volname,
- op_errstr);
- if (ret)
- goto out;
- }
- dict_copy (dict, req_dict);
- }
+ if (gd_set_commit_hash(dict) != 0) {
+ goto out;
+ }
- *req = req_dict;
- ret = 0;
+ dict_unref(req_dict);
+ req_dict = dict_ref(dict);
+ } break;
-out:
- return ret;
-}
+ case GD_OP_STATUS_VOLUME: {
+ ret = dict_get_uint32(dict, "cmd", &status_cmd);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Status command not present "
+ "in op ctx");
+ goto out;
+ }
+ if (GF_CLI_STATUS_ALL & status_cmd) {
+ dict_copy(dict, req_dict);
+ break;
+ }
+ do_common = _gf_true;
+ } break;
-static int
-glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
-{
- int ret = 0;
- int ret1 = 0;
- rpc_clnt_procedure_t *proc = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- dict_t *dict = NULL;
- dict_t *rsp_dict = NULL;
- char *op_errstr = NULL;
- glusterd_op_t op = GD_OP_NONE;
- uint32_t pending_count = 0;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- op = glusterd_op_get_op ();
+ case GD_OP_DELETE_VOLUME:
+ case GD_OP_START_VOLUME:
+ case GD_OP_STOP_VOLUME:
+ case GD_OP_ADD_BRICK:
+ case GD_OP_REPLACE_BRICK:
+ case GD_OP_RESET_VOLUME:
+ case GD_OP_LOG_ROTATE:
+ case GD_OP_QUOTA:
+ case GD_OP_PROFILE_VOLUME:
+ case GD_OP_HEAL_VOLUME:
+ case GD_OP_STATEDUMP_VOLUME:
+ case GD_OP_CLEARLOCKS_VOLUME:
+ case GD_OP_DEFRAG_BRICK_VOLUME:
+ case GD_OP_BARRIER:
+ case GD_OP_BITROT:
+ case GD_OP_TIER_START_STOP:
+ case GD_OP_TIER_STATUS:
+ case GD_OP_SCRUB_STATUS:
+ case GD_OP_SCRUB_ONDEMAND:
+ case GD_OP_RESET_BRICK: {
+ do_common = _gf_true;
+ } break;
- rsp_dict = dict_new();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to create rsp_dict");
- ret = -1;
+ case GD_OP_REBALANCE: {
+ if (gd_set_commit_hash(dict) != 0) {
goto out;
- }
+ }
+ do_common = _gf_true;
+ } break;
- ret = glusterd_op_build_payload (&dict, &op_errstr, NULL);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
- LOGSTR_BUILD_PAYLOAD,
- gd_op_list[op]);
- if (op_errstr == NULL)
- gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD);
- opinfo.op_errstr = op_errstr;
- goto out;
- }
+ case GD_OP_SYNC_VOLUME:
+ case GD_OP_COPY_FILE:
+ case GD_OP_SYS_EXEC: {
+ dict_copy(dict, req_dict);
+ } break;
- ret = glusterd_validate_quorum (this, op, dict, &op_errstr);
+ default:
+ break;
+ }
+
+ /*
+ * This has been moved out of the switch so that multiple ops with
+ * other special needs can all "fall through" to it.
+ */
+ if (do_common) {
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg (this->name, GF_LOG_CRITICAL, 0,
- GD_MSG_SERVER_QUORUM_NOT_MET,
- "Server quorum not met. Rejecting operation.");
- opinfo.op_errstr = op_errstr;
- goto out;
+ gf_msg(this->name, GF_LOG_CRITICAL, -ret, GD_MSG_DICT_GET_FAILED,
+ "volname is not present in "
+ "operation ctx");
+ goto out;
}
- ret = glusterd_op_stage_validate (op, dict, &op_errstr, rsp_dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VALIDATE_FAILED, LOGSTR_STAGE_FAIL,
- gd_op_list[op], "localhost",
- (op_errstr) ? ":" : " ", (op_errstr) ? op_errstr : " ");
- if (op_errstr == NULL)
- gf_asprintf (&op_errstr, OPERRSTR_STAGE_FAIL,
- "localhost");
- opinfo.op_errstr = op_errstr;
+ if (strcasecmp(volname, "all")) {
+ ret = glusterd_dict_set_volid(dict, volname, op_errstr);
+ if (ret)
goto out;
}
+ dict_copy(dict, req_dict);
+ }
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > opinfo.txn_generation)
- continue;
+ *req = req_dict;
+ ret = 0;
- if (!peerinfo->connected || !peerinfo->mgmt)
- continue;
- if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
- (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
- continue;
+out:
+ return ret;
+}
- proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_STAGE_OP];
- GF_ASSERT (proc);
- if (proc->fn) {
- ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
- if (ret) {
- rcu_read_unlock ();
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "failed to "
- "set peerinfo");
- goto out;
- }
+static int
+glusterd_op_ac_send_stage_op(glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ int ret1 = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ char *op_errstr = NULL;
+ glusterd_op_t op = GD_OP_NONE;
+ uint32_t pending_count = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ op = glusterd_op_get_op();
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to create rsp_dict");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_op_build_payload(&dict, &op_errstr, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
+ LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
+ if (op_errstr == NULL)
+ gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
+ opinfo.op_errstr = op_errstr;
+ goto out;
+ }
+
+ ret = glusterd_validate_quorum(this, op, dict, &op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
+ "Server quorum not met. Rejecting operation.");
+ opinfo.op_errstr = op_errstr;
+ goto out;
+ }
+
+ ret = glusterd_op_stage_validate(op, dict, &op_errstr, rsp_dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED,
+ LOGSTR_STAGE_FAIL, gd_op_list[op], "localhost",
+ (op_errstr) ? ":" : " ", (op_errstr) ? op_errstr : " ");
+ if (op_errstr == NULL)
+ gf_asprintf(&op_errstr, OPERRSTR_STAGE_FAIL, "localhost");
+ opinfo.op_errstr = op_errstr;
+ goto out;
+ }
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > opinfo.txn_generation)
+ continue;
- ret = proc->fn (NULL, this, dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_STAGE_REQ_SEND_FAIL, "Failed to "
- "send stage request for operation "
- "'Volume %s' to peer %s",
- gd_op_list[op], peerinfo->hostname);
- continue;
- }
- pending_count++;
- }
- }
- rcu_read_unlock ();
+ if (!peerinfo->connected || !peerinfo->mgmt)
+ continue;
+ if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
+ (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
+ continue;
- opinfo.pending_count = pending_count;
+ proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_STAGE_OP];
+ GF_ASSERT(proc);
+ if (proc->fn) {
+ ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
+ if (ret) {
+ rcu_read_unlock();
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to "
+ "set peerinfo");
+ goto out;
+ }
+
+ ret = proc->fn(NULL, this, dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_STAGE_REQ_SEND_FAIL,
+ "Failed to "
+ "send stage request for operation "
+ "'Volume %s' to peer %s",
+ gd_op_list[op], peerinfo->hostname);
+ continue;
+ }
+ pending_count++;
+ }
+ }
+ rcu_read_unlock();
+
+ opinfo.pending_count = pending_count;
out:
- if (ret)
- opinfo.op_ret = ret;
+ if (ret)
+ opinfo.op_ret = ret;
- ret1 = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret1)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ ret1 = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret1)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- if (rsp_dict)
- dict_unref (rsp_dict);
+ if (dict)
+ dict_unref(dict);
+ if (ret) {
+ glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, &event->txn_id, NULL);
+ opinfo.op_ret = ret;
+ }
- if (dict)
- dict_unref (dict);
- if (ret) {
- glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
- &event->txn_id, NULL);
- opinfo.op_ret = ret;
- }
+ gf_msg_debug(this->name, 0,
+ "Sent stage op request for "
+ "'Volume %s' to %d peers",
+ gd_op_list[op], opinfo.pending_count);
- gf_msg_debug (this->name, 0, "Sent stage op request for "
- "'Volume %s' to %d peers", gd_op_list[op],
- opinfo.pending_count);
+ if (!opinfo.pending_count)
+ ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
- if (!opinfo.pending_count)
- ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
-
- gf_msg_debug (this->name, 0, "Returning with %d", ret);
-
- return ret;
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ return ret;
}
/* This function takes a dict and converts the uuid values of key specified
* into hostnames
*/
static int
-glusterd_op_volume_dict_uuid_to_hostname (dict_t *dict, const char *key_fmt,
- int idx_min, int idx_max)
+glusterd_op_volume_dict_uuid_to_hostname(dict_t *dict, const char *key_fmt,
+ int idx_min, int idx_max)
{
- int ret = -1;
- int i = 0;
- char key[1024];
- int keylen;
- char *uuid_str = NULL;
- uuid_t uuid = {0,};
- char *hostname = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (dict);
- GF_ASSERT (key_fmt);
-
- for (i = idx_min; i < idx_max; i++) {
- keylen = snprintf (key, sizeof (key), key_fmt, i);
- ret = dict_get_strn (dict, key, keylen, &uuid_str);
- if (ret) {
- ret = 0;
- continue;
- }
+ int ret = -1;
+ int i = 0;
+ char key[1024];
+ int keylen;
+ char *uuid_str = NULL;
+ uuid_t uuid = {
+ 0,
+ };
+ char *hostname = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(dict);
+ GF_ASSERT(key_fmt);
+
+ for (i = idx_min; i < idx_max; i++) {
+ keylen = snprintf(key, sizeof(key), key_fmt, i);
+ ret = dict_get_strn(dict, key, keylen, &uuid_str);
+ if (ret) {
+ ret = 0;
+ continue;
+ }
- gf_msg_debug (this->name, 0, "Got uuid %s",
- uuid_str);
+ gf_msg_debug(this->name, 0, "Got uuid %s", uuid_str);
- ret = gf_uuid_parse (uuid_str, uuid);
- /* if parsing fails don't error out
- * let the original value be retained
- */
- if (ret) {
- ret = 0;
- continue;
- }
+ ret = gf_uuid_parse(uuid_str, uuid);
+ /* if parsing fails don't error out
+ * let the original value be retained
+ */
+ if (ret) {
+ ret = 0;
+ continue;
+ }
- hostname = glusterd_uuid_to_hostname (uuid);
- if (hostname) {
- gf_msg_debug (this->name, 0, "%s -> %s",
- uuid_str, hostname);
- ret = dict_set_dynstrn (dict, key, keylen, hostname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Error setting hostname %s to dict",
- hostname);
- GF_FREE (hostname);
- goto out;
- }
- }
+ hostname = glusterd_uuid_to_hostname(uuid);
+ if (hostname) {
+ gf_msg_debug(this->name, 0, "%s -> %s", uuid_str, hostname);
+ ret = dict_set_dynstrn(dict, key, keylen, hostname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Error setting hostname %s to dict", hostname);
+ GF_FREE(hostname);
+ goto out;
+ }
}
+ }
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-reassign_defrag_status (dict_t *dict, char *key, int keylen, gf_defrag_status_t *status)
+reassign_defrag_status(dict_t *dict, char *key, int keylen,
+ gf_defrag_status_t *status)
{
- int ret = 0;
+ int ret = 0;
- if (!*status)
- return ret;
+ if (!*status)
+ return ret;
- switch (*status) {
+ switch (*status) {
case GF_DEFRAG_STATUS_STARTED:
- *status = GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED;
- break;
+ *status = GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED;
+ break;
case GF_DEFRAG_STATUS_STOPPED:
- *status = GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED;
- break;
+ *status = GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED;
+ break;
case GF_DEFRAG_STATUS_COMPLETE:
- *status = GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE;
- break;
+ *status = GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE;
+ break;
case GF_DEFRAG_STATUS_FAILED:
- *status = GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED;
- break;
+ *status = GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED;
+ break;
default:
- break;
- }
+ break;
+ }
- ret = dict_set_int32n (dict, key, keylen, *status);
- if (ret)
- gf_msg (THIS->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_SET_FAILED,
- "failed to reset defrag %s in dict", key);
+ ret = dict_set_int32n(dict, key, keylen, *status);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to reset defrag %s in dict", key);
- return ret;
+ return ret;
}
/* Check and reassign the defrag_status enum got from the rebalance process
@@ -4961,57 +4771,56 @@ reassign_defrag_status (dict_t *dict, char *key, int keylen, gf_defrag_status_t
* full-rebalance or just a fix-layout was carried out.
*/
static int
-glusterd_op_check_peer_defrag_status (dict_t *dict, int count)
+glusterd_op_check_peer_defrag_status(dict_t *dict, int count)
{
- glusterd_volinfo_t *volinfo = NULL;
- gf_defrag_status_t status = GF_DEFRAG_STATUS_NOT_STARTED;
- char key[64] = {0,};
- int keylen;
- char *volname = NULL;
- int ret = -1;
- int i = 1;
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
- }
+ glusterd_volinfo_t *volinfo = NULL;
+ gf_defrag_status_t status = GF_DEFRAG_STATUS_NOT_STARTED;
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+ char *volname = NULL;
+ int ret = -1;
+ int i = 1;
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
+ FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ if (volinfo->rebal.defrag_cmd != GF_DEFRAG_CMD_START_LAYOUT_FIX) {
+ /* Fix layout was not issued; we don't need to reassign
+ the status */
+ ret = 0;
+ goto out;
+ }
- ret = glusterd_volinfo_find (volname, &volinfo);
+ do {
+ keylen = snprintf(key, sizeof(key), "status-%d", i);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&status);
if (ret) {
- gf_msg (THIS->name, GF_LOG_WARNING, 0,
- GD_MSG_VOL_NOT_FOUND, FMTSTR_CHECK_VOL_EXISTS,
- volname);
- goto out;
+ gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
+ "failed to get defrag %s", key);
+ goto out;
}
+ ret = reassign_defrag_status(dict, key, keylen, &status);
+ if (ret)
+ goto out;
+ i++;
+ } while (i <= count);
- if (volinfo->rebal.defrag_cmd != GF_DEFRAG_CMD_START_LAYOUT_FIX) {
- /* Fix layout was not issued; we don't need to reassign
- the status */
- ret = 0;
- goto out;
- }
-
- do {
- keylen = snprintf (key, sizeof (key), "status-%d", i);
- ret = dict_get_int32n (dict, key, keylen, (int32_t *)&status);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_WARNING, 0,
- GD_MSG_DICT_GET_FAILED,
- "failed to get defrag %s", key);
- goto out;
- }
- ret = reassign_defrag_status (dict, key, keylen, &status);
- if (ret)
- goto out;
- i++;
- } while (i <= count);
-
- ret = 0;
+ ret = 0;
out:
- return ret;
-
+ return ret;
}
/* This function is used to verify if op_ctx indeed
@@ -5035,54 +4844,49 @@ out:
*/
static gf_boolean_t
-glusterd_is_volume_status_modify_op_ctx (uint32_t cmd)
+glusterd_is_volume_status_modify_op_ctx(uint32_t cmd)
{
- if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
- if (cmd & GF_CLI_STATUS_BRICK)
- return _gf_false;
- if (cmd & GF_CLI_STATUS_ALL)
- return _gf_false;
- return _gf_true;
- }
- return _gf_false;
+ if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
+ if (cmd & GF_CLI_STATUS_BRICK)
+ return _gf_false;
+ if (cmd & GF_CLI_STATUS_ALL)
+ return _gf_false;
+ return _gf_true;
+ }
+ return _gf_false;
}
int
-glusterd_op_modify_port_key (dict_t *op_ctx, int brick_index_max)
+glusterd_op_modify_port_key(dict_t *op_ctx, int brick_index_max)
{
- char *port = NULL;
- int i = 0;
- int ret = -1;
- char key[64] = {0};
- int keylen;
- char old_key[64] = {0};
- int old_keylen;
-
- for (i = 0; i <= brick_index_max; i++) {
-
- keylen = snprintf (key, sizeof (key), "brick%d.rdma_port", i);
- ret = dict_get_strn (op_ctx, key, keylen, &port);
-
- if (ret) {
-
- old_keylen = snprintf (old_key, sizeof (old_key),
- "brick%d.port", i);
- ret = dict_get_strn (op_ctx, old_key, old_keylen,
- &port);
- if (ret)
- goto out;
-
- ret = dict_set_strn (op_ctx, key, keylen, port);
- if (ret)
- goto out;
- ret = dict_set_nstrn (op_ctx, old_key, old_keylen,
- "\0", SLEN ("\0"));
- if (ret)
- goto out;
- }
+ char *port = NULL;
+ int i = 0;
+ int ret = -1;
+ char key[64] = {0};
+ int keylen;
+ char old_key[64] = {0};
+ int old_keylen;
+
+ for (i = 0; i <= brick_index_max; i++) {
+ keylen = snprintf(key, sizeof(key), "brick%d.rdma_port", i);
+ ret = dict_get_strn(op_ctx, key, keylen, &port);
+
+ if (ret) {
+ old_keylen = snprintf(old_key, sizeof(old_key), "brick%d.port", i);
+ ret = dict_get_strn(op_ctx, old_key, old_keylen, &port);
+ if (ret)
+ goto out;
+
+ ret = dict_set_strn(op_ctx, key, keylen, port);
+ if (ret)
+ goto out;
+ ret = dict_set_nstrn(op_ctx, old_key, old_keylen, "\0", SLEN("\0"));
+ if (ret)
+ goto out;
}
+ }
out:
- return ret;
+ return ret;
}
/* This function is used to modify the op_ctx dict before sending it back
@@ -5090,169 +4894,152 @@ out:
* hostnames etc.
*/
void
-glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx)
+glusterd_op_modify_op_ctx(glusterd_op_t op, void *ctx)
{
- int ret = -1;
- dict_t *op_ctx = NULL;
- int brick_index_max = -1;
- int other_count = 0;
- int count = 0;
- uint32_t cmd = GF_CLI_STATUS_NONE;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char *port = 0;
- int i = 0;
- char key[64] = {0,};
- int keylen;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
-
- if (ctx)
- op_ctx = ctx;
- else
- op_ctx = glusterd_op_get_ctx();
+ int ret = -1;
+ dict_t *op_ctx = NULL;
+ int brick_index_max = -1;
+ int other_count = 0;
+ int count = 0;
+ uint32_t cmd = GF_CLI_STATUS_NONE;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *port = 0;
+ int i = 0;
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+
+ if (ctx)
+ op_ctx = ctx;
+ else
+ op_ctx = glusterd_op_get_ctx();
+
+ if (!op_ctx) {
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_OPCTX_NULL,
+ "Operation context is not present.");
+ goto out;
+ }
+
+ switch (op) {
+ case GD_OP_STATUS_VOLUME:
+ ret = dict_get_uint32(op_ctx, "cmd", &cmd);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "Failed to get status cmd");
+ goto out;
+ }
- if (!op_ctx) {
- gf_msg (this->name, GF_LOG_CRITICAL, 0,
- GD_MSG_OPCTX_NULL,
- "Operation context is not present.");
+ if (!glusterd_is_volume_status_modify_op_ctx(cmd)) {
+ gf_msg_debug(this->name, 0,
+ "op_ctx modification not required for status "
+ "operation being performed");
goto out;
- }
+ }
- switch (op) {
- case GD_OP_STATUS_VOLUME:
- ret = dict_get_uint32 (op_ctx, "cmd", &cmd);
- if (ret) {
- gf_msg_debug (this->name, 0,
- "Failed to get status cmd");
- goto out;
- }
+ ret = dict_get_int32n(op_ctx, "brick-index-max",
+ SLEN("brick-index-max"), &brick_index_max);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "Failed to get brick-index-max");
+ goto out;
+ }
- if (!glusterd_is_volume_status_modify_op_ctx (cmd)) {
- gf_msg_debug (this->name, 0,
- "op_ctx modification not required for status "
- "operation being performed");
- goto out;
- }
+ ret = dict_get_int32n(op_ctx, "other-count", SLEN("other-count"),
+ &other_count);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "Failed to get other-count");
+ goto out;
+ }
- ret = dict_get_int32n (op_ctx, "brick-index-max",
- SLEN ("brick-index-max"),
- &brick_index_max);
- if (ret) {
- gf_msg_debug (this->name, 0,
- "Failed to get brick-index-max");
- goto out;
- }
+ count = brick_index_max + other_count + 1;
+
+ /*
+ * a glusterd lesser than version 3.7 will be sending the
+ * rdma port in older key. Changing that value from here
+ * to support backward compatibility
+ */
+ ret = dict_get_strn(op_ctx, "volname", SLEN("volname"), &volname);
+ if (ret)
+ goto out;
- ret = dict_get_int32n (op_ctx, "other-count",
- SLEN ("other-count"), &other_count);
+ for (i = 0; i <= brick_index_max; i++) {
+ keylen = snprintf(key, sizeof(key), "brick%d.rdma_port", i);
+ ret = dict_get_strn(op_ctx, key, keylen, &port);
if (ret) {
- gf_msg_debug (this->name, 0,
- "Failed to get other-count");
+ ret = dict_set_nstrn(op_ctx, key, keylen, "\0", SLEN("\0"));
+ if (ret)
goto out;
}
-
- count = brick_index_max + other_count + 1;
-
- /*
- * a glusterd lesser than version 3.7 will be sending the
- * rdma port in older key. Changing that value from here
- * to support backward compatibility
- */
- ret = dict_get_strn (op_ctx, "volname", SLEN ("volname"),
- &volname);
- if (ret)
- goto out;
-
- for (i = 0; i <= brick_index_max; i++) {
- keylen = snprintf (key, sizeof (key),
- "brick%d.rdma_port", i);
- ret = dict_get_strn (op_ctx, key, keylen, &port);
- if (ret) {
- ret = dict_set_nstrn (op_ctx, key, keylen,
- "\0", SLEN ("\0"));
- if (ret)
- goto out;
- }
- }
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret)
- goto out;
- if (conf->op_version < GD_OP_VERSION_3_7_0 &&
- volinfo->transport_type == GF_TRANSPORT_RDMA) {
- ret = glusterd_op_modify_port_key (op_ctx,
- brick_index_max);
- if (ret)
- goto out;
- }
- /* add 'brick%d.peerid' into op_ctx with value of 'brick%d.path'.
- nfs/sshd like services have this additional uuid */
- {
- char *uuid_str = NULL;
- char *uuid = NULL;
- int i;
-
- for (i = brick_index_max + 1; i < count; i++) {
- keylen = snprintf (key, sizeof (key),
- "brick%d.path", i);
- ret = dict_get_strn (op_ctx, key, keylen,
- &uuid_str);
- if (!ret) {
- keylen = snprintf (key, sizeof (key),
- "brick%d.peerid",
- i);
- uuid = gf_strdup (uuid_str);
- if (!uuid) {
- gf_msg_debug (this->name, 0,
- "unable to create dup of"
- " uuid_str");
- continue;
- }
- ret = dict_set_dynstrn (op_ctx, key,
- keylen, uuid);
- if (ret != 0) {
- GF_FREE (uuid);
- }
- }
+ }
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret)
+ goto out;
+ if (conf->op_version < GD_OP_VERSION_3_7_0 &&
+ volinfo->transport_type == GF_TRANSPORT_RDMA) {
+ ret = glusterd_op_modify_port_key(op_ctx, brick_index_max);
+ if (ret)
+ goto out;
+ }
+ /* add 'brick%d.peerid' into op_ctx with value of 'brick%d.path'.
+ nfs/sshd like services have this additional uuid */
+ {
+ char *uuid_str = NULL;
+ char *uuid = NULL;
+ int i;
+
+ for (i = brick_index_max + 1; i < count; i++) {
+ keylen = snprintf(key, sizeof(key), "brick%d.path", i);
+ ret = dict_get_strn(op_ctx, key, keylen, &uuid_str);
+ if (!ret) {
+ keylen = snprintf(key, sizeof(key), "brick%d.peerid",
+ i);
+ uuid = gf_strdup(uuid_str);
+ if (!uuid) {
+ gf_msg_debug(this->name, 0,
+ "unable to create dup of"
+ " uuid_str");
+ continue;
}
+ ret = dict_set_dynstrn(op_ctx, key, keylen, uuid);
+ if (ret != 0) {
+ GF_FREE(uuid);
+ }
+ }
}
+ }
- ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
- "brick%d.path",
- 0, count);
- if (ret)
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_CONVERSION_FAILED,
- "Failed uuid to hostname conversion");
+ ret = glusterd_op_volume_dict_uuid_to_hostname(
+ op_ctx, "brick%d.path", 0, count);
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_CONVERSION_FAILED,
+ "Failed uuid to hostname conversion");
- break;
+ break;
case GD_OP_PROFILE_VOLUME:
- ret = dict_get_str_boolean (op_ctx, "nfs", _gf_false);
- if (!ret)
- goto out;
+ ret = dict_get_str_boolean(op_ctx, "nfs", _gf_false);
+ if (!ret)
+ goto out;
- ret = dict_get_int32n (op_ctx, "count", SLEN ("count"),
- &count);
- if (ret) {
- gf_msg_debug (this->name, 0,
- "Failed to get brick count");
- goto out;
- }
+ ret = dict_get_int32n(op_ctx, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "Failed to get brick count");
+ goto out;
+ }
- ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
- "%d-brick",
- 1, (count + 1));
- if (ret)
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_CONVERSION_FAILED,
- "Failed uuid to hostname conversion");
+ ret = glusterd_op_volume_dict_uuid_to_hostname(op_ctx, "%d-brick",
+ 1, (count + 1));
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_CONVERSION_FAILED,
+ "Failed uuid to hostname conversion");
- break;
+ break;
/* For both rebalance and remove-brick status, the glusterd op is the
* same
@@ -5263,724 +5050,677 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx)
case GD_OP_DETACH_TIER_STATUS:
case GD_OP_SCRUB_STATUS:
case GD_OP_SCRUB_ONDEMAND:
- ret = dict_get_int32n (op_ctx, "count", SLEN ("count"),
- &count);
- if (ret) {
- gf_msg_debug (this->name, 0,
- "Failed to get count");
- goto out;
- }
-
- /* add 'node-name-%d' into op_ctx with value uuid_str.
- this will be used to convert to hostname later */
- {
- char *uuid_str = NULL;
- char *uuid = NULL;
- int i;
-
- for (i = 1; i <= count; i++) {
- keylen = snprintf (key, sizeof (key),
- "node-uuid-%d", i);
- ret = dict_get_strn (op_ctx, key, keylen,
- &uuid_str);
- if (!ret) {
- keylen = snprintf (key, sizeof (key),
- "node-name-%d", i);
- uuid = gf_strdup (uuid_str);
- if (!uuid) {
- gf_msg_debug (this->name, 0,
- "unable to create dup of"
- " uuid_str");
- continue;
- }
- ret = dict_set_dynstrn (op_ctx, key,
- keylen, uuid);
- if (ret != 0) {
- GF_FREE (uuid);
- }
- }
+ ret = dict_get_int32n(op_ctx, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "Failed to get count");
+ goto out;
+ }
+
+ /* add 'node-name-%d' into op_ctx with value uuid_str.
+ this will be used to convert to hostname later */
+ {
+ char *uuid_str = NULL;
+ char *uuid = NULL;
+ int i;
+
+ for (i = 1; i <= count; i++) {
+ keylen = snprintf(key, sizeof(key), "node-uuid-%d", i);
+ ret = dict_get_strn(op_ctx, key, keylen, &uuid_str);
+ if (!ret) {
+ keylen = snprintf(key, sizeof(key), "node-name-%d", i);
+ uuid = gf_strdup(uuid_str);
+ if (!uuid) {
+ gf_msg_debug(this->name, 0,
+ "unable to create dup of"
+ " uuid_str");
+ continue;
}
+ ret = dict_set_dynstrn(op_ctx, key, keylen, uuid);
+ if (ret != 0) {
+ GF_FREE(uuid);
+ }
+ }
}
+ }
- ret = glusterd_op_volume_dict_uuid_to_hostname (op_ctx,
- "node-name-%d",
- 1, (count + 1));
- if (ret)
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_CONVERSION_FAILED,
- "Failed uuid to hostname conversion");
-
- /* Since Both rebalance and bitrot scrub status/ondemand
- * are going to use same code path till here, we should
- * break in case of scrub status.
- */
- if (op == GD_OP_SCRUB_STATUS || op == GD_OP_SCRUB_ONDEMAND) {
- break;
- }
+ ret = glusterd_op_volume_dict_uuid_to_hostname(
+ op_ctx, "node-name-%d", 1, (count + 1));
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_CONVERSION_FAILED,
+ "Failed uuid to hostname conversion");
- ret = glusterd_op_check_peer_defrag_status (op_ctx, count);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DEFRAG_STATUS_UPDATE_FAIL,
- "Failed to reset defrag status for fix-layout");
+ /* Since Both rebalance and bitrot scrub status/ondemand
+ * are going to use same code path till here, we should
+ * break in case of scrub status.
+ */
+ if (op == GD_OP_SCRUB_STATUS || op == GD_OP_SCRUB_ONDEMAND) {
break;
+ }
- default:
- ret = 0;
- gf_msg_debug (this->name, 0,
- "op_ctx modification not required");
- break;
+ ret = glusterd_op_check_peer_defrag_status(op_ctx, count);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DEFRAG_STATUS_UPDATE_FAIL,
+ "Failed to reset defrag status for fix-layout");
+ break;
- }
+ default:
+ ret = 0;
+ gf_msg_debug(this->name, 0, "op_ctx modification not required");
+ break;
+ }
out:
- if (ret)
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_OPCTX_UPDATE_FAIL,
- "op_ctx modification failed");
- return;
+ if (ret)
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_OPCTX_UPDATE_FAIL,
+ "op_ctx modification failed");
+ return;
}
int
-glusterd_op_commit_hook (glusterd_op_t op, dict_t *op_ctx,
- glusterd_commit_hook_type_t type)
+glusterd_op_commit_hook(glusterd_op_t op, dict_t *op_ctx,
+ glusterd_commit_hook_type_t type)
{
- glusterd_conf_t *priv = NULL;
- char hookdir[PATH_MAX] = {0, };
- char scriptdir[PATH_MAX] = {0, };
- char *type_subdir = "";
- char *cmd_subdir = NULL;
- int ret = -1;
- int32_t len = 0;
-
- priv = THIS->private;
- switch (type) {
- case GD_COMMIT_HOOK_NONE:
- case GD_COMMIT_HOOK_MAX:
- /*Won't be called*/
- break;
-
- case GD_COMMIT_HOOK_PRE:
- type_subdir = "pre";
- break;
- case GD_COMMIT_HOOK_POST:
- type_subdir = "post";
- break;
- }
-
- cmd_subdir = glusterd_hooks_get_hooks_cmd_subdir (op);
- if (strlen (cmd_subdir) == 0)
- return -1;
-
- GLUSTERD_GET_HOOKS_DIR (hookdir, GLUSTERD_HOOK_VER, priv);
- len = snprintf (scriptdir, sizeof (scriptdir), "%s/%s/%s",
- hookdir, cmd_subdir, type_subdir);
- if ((len < 0) || (len >= sizeof(scriptdir))) {
- return -1;
- }
-
- switch (type) {
- case GD_COMMIT_HOOK_NONE:
- case GD_COMMIT_HOOK_MAX:
- /*Won't be called*/
- break;
-
- case GD_COMMIT_HOOK_PRE:
- ret = glusterd_hooks_run_hooks (scriptdir, op, op_ctx,
- type);
- break;
- case GD_COMMIT_HOOK_POST:
- ret = glusterd_hooks_post_stub_enqueue (scriptdir, op,
- op_ctx);
- break;
- }
-
- return ret;
+ glusterd_conf_t *priv = NULL;
+ char hookdir[PATH_MAX] = {
+ 0,
+ };
+ char scriptdir[PATH_MAX] = {
+ 0,
+ };
+ char *type_subdir = "";
+ char *cmd_subdir = NULL;
+ int ret = -1;
+ int32_t len = 0;
+
+ priv = THIS->private;
+ switch (type) {
+ case GD_COMMIT_HOOK_NONE:
+ case GD_COMMIT_HOOK_MAX:
+ /*Won't be called*/
+ break;
+
+ case GD_COMMIT_HOOK_PRE:
+ type_subdir = "pre";
+ break;
+ case GD_COMMIT_HOOK_POST:
+ type_subdir = "post";
+ break;
+ }
+
+ cmd_subdir = glusterd_hooks_get_hooks_cmd_subdir(op);
+ if (strlen(cmd_subdir) == 0)
+ return -1;
+
+ GLUSTERD_GET_HOOKS_DIR(hookdir, GLUSTERD_HOOK_VER, priv);
+ len = snprintf(scriptdir, sizeof(scriptdir), "%s/%s/%s", hookdir,
+ cmd_subdir, type_subdir);
+ if ((len < 0) || (len >= sizeof(scriptdir))) {
+ return -1;
+ }
+
+ switch (type) {
+ case GD_COMMIT_HOOK_NONE:
+ case GD_COMMIT_HOOK_MAX:
+ /*Won't be called*/
+ break;
+
+ case GD_COMMIT_HOOK_PRE:
+ ret = glusterd_hooks_run_hooks(scriptdir, op, op_ctx, type);
+ break;
+ case GD_COMMIT_HOOK_POST:
+ ret = glusterd_hooks_post_stub_enqueue(scriptdir, op, op_ctx);
+ break;
+ }
+
+ return ret;
}
static int
-glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_send_commit_op(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- int ret1 = 0;
- rpc_clnt_procedure_t *proc = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- dict_t *dict = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- char *op_errstr = NULL;
- glusterd_op_t op = GD_OP_NONE;
- uint32_t pending_count = 0;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- op = glusterd_op_get_op ();
-
- ret = glusterd_op_build_payload (&dict, &op_errstr, NULL);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
- LOGSTR_BUILD_PAYLOAD,
- gd_op_list[op]);
- if (op_errstr == NULL)
- gf_asprintf (&op_errstr, OPERRSTR_BUILD_PAYLOAD);
- opinfo.op_errstr = op_errstr;
- goto out;
- }
-
- ret = glusterd_op_commit_perform (op, dict, &op_errstr, NULL); //rsp_dict invalid for source
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL, LOGSTR_COMMIT_FAIL,
- gd_op_list[op], "localhost", (op_errstr) ? ":" : " ",
- (op_errstr) ? op_errstr : " ");
- if (op_errstr == NULL)
- gf_asprintf (&op_errstr, OPERRSTR_COMMIT_FAIL,
- "localhost");
- opinfo.op_errstr = op_errstr;
- goto out;
- }
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
- /* Only send requests to peers who were available before the
- * transaction started
- */
- if (peerinfo->generation > opinfo.txn_generation)
- continue;
-
- if (!peerinfo->connected || !peerinfo->mgmt)
- continue;
- if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
- (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
- continue;
+ int ret = 0;
+ int ret1 = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ dict_t *dict = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ char *op_errstr = NULL;
+ glusterd_op_t op = GD_OP_NONE;
+ uint32_t pending_count = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ op = glusterd_op_get_op();
+
+ ret = glusterd_op_build_payload(&dict, &op_errstr, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
+ LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
+ if (op_errstr == NULL)
+ gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
+ opinfo.op_errstr = op_errstr;
+ goto out;
+ }
+
+ ret = glusterd_op_commit_perform(op, dict, &op_errstr,
+ NULL); // rsp_dict invalid for source
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ LOGSTR_COMMIT_FAIL, gd_op_list[op], "localhost",
+ (op_errstr) ? ":" : " ", (op_errstr) ? op_errstr : " ");
+ if (op_errstr == NULL)
+ gf_asprintf(&op_errstr, OPERRSTR_COMMIT_FAIL, "localhost");
+ opinfo.op_errstr = op_errstr;
+ goto out;
+ }
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
+ {
+ /* Only send requests to peers who were available before the
+ * transaction started
+ */
+ if (peerinfo->generation > opinfo.txn_generation)
+ continue;
- proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_COMMIT_OP];
- GF_ASSERT (proc);
- if (proc->fn) {
- ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
- if (ret) {
- rcu_read_unlock ();
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "failed to set peerinfo");
- goto out;
- }
- ret = proc->fn (NULL, this, dict);
- if (ret) {
- gf_msg (this->name, GF_LOG_WARNING, 0,
- GD_MSG_COMMIT_REQ_SEND_FAIL,
- "Failed to "
- "send commit request for operation "
- "'Volume %s' to peer %s",
- gd_op_list[op], peerinfo->hostname);
- continue;
- }
- pending_count++;
- }
- }
- rcu_read_unlock ();
+ if (!peerinfo->connected || !peerinfo->mgmt)
+ continue;
+ if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
+ (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
+ continue;
- opinfo.pending_count = pending_count;
- gf_msg_debug (this->name, 0, "Sent commit op req for 'Volume %s' "
- "to %d peers", gd_op_list[op], opinfo.pending_count);
+ proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_COMMIT_OP];
+ GF_ASSERT(proc);
+ if (proc->fn) {
+ ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
+ if (ret) {
+ rcu_read_unlock();
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set peerinfo");
+ goto out;
+ }
+ ret = proc->fn(NULL, this, dict);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_COMMIT_REQ_SEND_FAIL,
+ "Failed to "
+ "send commit request for operation "
+ "'Volume %s' to peer %s",
+ gd_op_list[op], peerinfo->hostname);
+ continue;
+ }
+ pending_count++;
+ }
+ }
+ rcu_read_unlock();
+
+ opinfo.pending_count = pending_count;
+ gf_msg_debug(this->name, 0,
+ "Sent commit op req for 'Volume %s' "
+ "to %d peers",
+ gd_op_list[op], opinfo.pending_count);
out:
- if (dict)
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
- if (ret)
- opinfo.op_ret = ret;
+ if (ret)
+ opinfo.op_ret = ret;
- ret1 = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret1)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ ret1 = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret1)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
- if (ret) {
- glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
- &event->txn_id, NULL);
- opinfo.op_ret = ret;
- }
+ if (ret) {
+ glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, &event->txn_id, NULL);
+ opinfo.op_ret = ret;
+ }
- if (!opinfo.pending_count) {
- if (op == GD_OP_REPLACE_BRICK) {
- ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
- } else {
- glusterd_op_modify_op_ctx (op, NULL);
- ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
- }
- goto err;
+ if (!opinfo.pending_count) {
+ if (op == GD_OP_REPLACE_BRICK) {
+ ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
+ } else {
+ glusterd_op_modify_op_ctx(op, NULL);
+ ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
}
+ goto err;
+ }
err:
- gf_msg_debug (this->name, 0, "Returning with %d", ret);
-
- return ret;
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
+ return ret;
}
static int
-glusterd_op_ac_rcvd_stage_op_acc (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_rcvd_stage_op_acc(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
-
- GF_ASSERT (event);
+ int ret = 0;
- if (opinfo.pending_count > 0)
- opinfo.pending_count--;
+ GF_ASSERT(event);
+ if (opinfo.pending_count > 0)
+ opinfo.pending_count--;
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+ if (opinfo.pending_count > 0)
+ goto out;
-
- if (opinfo.pending_count > 0)
- goto out;
-
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_STAGE_ACC,
- &event->txn_id, NULL);
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_STAGE_ACC, &event->txn_id,
+ NULL);
out:
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_op_ac_stage_op_failed (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_stage_op_failed(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
-
- GF_ASSERT (event);
+ int ret = 0;
- if (opinfo.pending_count > 0)
- opinfo.pending_count--;
+ GF_ASSERT(event);
+ if (opinfo.pending_count > 0)
+ opinfo.pending_count--;
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
-
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+ if (opinfo.pending_count > 0)
+ goto out;
- if (opinfo.pending_count > 0)
- goto out;
-
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
- &event->txn_id, NULL);
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
+ NULL);
out:
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_op_ac_commit_op_failed (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_commit_op_failed(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
-
- GF_ASSERT (event);
+ int ret = 0;
- if (opinfo.pending_count > 0)
- opinfo.pending_count--;
+ GF_ASSERT(event);
+ if (opinfo.pending_count > 0)
+ opinfo.pending_count--;
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+ if (opinfo.pending_count > 0)
+ goto out;
-
- if (opinfo.pending_count > 0)
- goto out;
-
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
- &event->txn_id, NULL);
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
+ NULL);
out:
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_op_ac_brick_op_failed (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_brick_op_failed(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
- gf_boolean_t free_errstr = _gf_false;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (event);
- GF_ASSERT (ctx);
- ev_ctx = ctx;
-
- ret = glusterd_remove_pending_entry (&opinfo.pending_bricks, ev_ctx->pending_node->node);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_UNKNOWN_RESPONSE, "unknown response received ");
- ret = -1;
- free_errstr = _gf_true;
- goto out;
- }
- if (opinfo.brick_pending_count > 0)
- opinfo.brick_pending_count--;
- if (opinfo.op_ret == 0)
- opinfo.op_ret = ev_ctx->op_ret;
-
- if (opinfo.op_errstr == NULL)
- opinfo.op_errstr = ev_ctx->op_errstr;
- else
- free_errstr = _gf_true;
-
-
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
-
-
- if (opinfo.brick_pending_count > 0)
- goto out;
-
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
- &event->txn_id, ev_ctx->commit_ctx);
+ int ret = 0;
+ glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
+ gf_boolean_t free_errstr = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(event);
+ GF_ASSERT(ctx);
+ ev_ctx = ctx;
+
+ ret = glusterd_remove_pending_entry(&opinfo.pending_bricks,
+ ev_ctx->pending_node->node);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_RESPONSE,
+ "unknown response received ");
+ ret = -1;
+ free_errstr = _gf_true;
+ goto out;
+ }
+ if (opinfo.brick_pending_count > 0)
+ opinfo.brick_pending_count--;
+ if (opinfo.op_ret == 0)
+ opinfo.op_ret = ev_ctx->op_ret;
+
+ if (opinfo.op_errstr == NULL)
+ opinfo.op_errstr = ev_ctx->op_errstr;
+ else
+ free_errstr = _gf_true;
+
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+
+ if (opinfo.brick_pending_count > 0)
+ goto out;
+
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
+ ev_ctx->commit_ctx);
out:
- if (ev_ctx->rsp_dict)
- dict_unref (ev_ctx->rsp_dict);
- if (free_errstr && ev_ctx->op_errstr)
- GF_FREE (ev_ctx->op_errstr);
- GF_FREE (ctx);
- gf_msg_debug (this->name, 0, "Returning %d", ret);
-
- return ret;
+ if (ev_ctx->rsp_dict)
+ dict_unref(ev_ctx->rsp_dict);
+ if (free_errstr && ev_ctx->op_errstr)
+ GF_FREE(ev_ctx->op_errstr);
+ GF_FREE(ctx);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+
+ return ret;
}
static int
-glusterd_op_ac_rcvd_commit_op_acc (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_rcvd_commit_op_acc(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- gf_boolean_t commit_ack_inject = _gf_true;
- glusterd_op_t op = GD_OP_NONE;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- op = glusterd_op_get_op ();
- GF_ASSERT (event);
-
- if (opinfo.pending_count > 0)
- opinfo.pending_count--;
-
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
-
-
- if (opinfo.pending_count > 0)
- goto out;
-
- if (op == GD_OP_REPLACE_BRICK) {
- ret = glusterd_op_sm_inject_all_acc (&event->txn_id);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_RBOP_START_FAIL, "Couldn't start "
- "replace-brick operation.");
- goto out;
- }
-
- commit_ack_inject = _gf_false;
- goto out;
+ int ret = 0;
+ gf_boolean_t commit_ack_inject = _gf_true;
+ glusterd_op_t op = GD_OP_NONE;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ op = glusterd_op_get_op();
+ GF_ASSERT(event);
+
+ if (opinfo.pending_count > 0)
+ opinfo.pending_count--;
+
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+
+ if (opinfo.pending_count > 0)
+ goto out;
+
+ if (op == GD_OP_REPLACE_BRICK) {
+ ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RBOP_START_FAIL,
+ "Couldn't start "
+ "replace-brick operation.");
+ goto out;
}
+ commit_ack_inject = _gf_false;
+ goto out;
+ }
out:
- if (commit_ack_inject) {
- if (ret)
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT,
- &event->txn_id,
- NULL);
- else if (!opinfo.pending_count) {
- glusterd_op_modify_op_ctx (op, NULL);
- ret = glusterd_op_sm_inject_event
- (GD_OP_EVENT_COMMIT_ACC,
- &event->txn_id, NULL);
- }
- /*else do nothing*/
+ if (commit_ack_inject) {
+ if (ret)
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT,
+ &event->txn_id, NULL);
+ else if (!opinfo.pending_count) {
+ glusterd_op_modify_op_ctx(op, NULL);
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_COMMIT_ACC,
+ &event->txn_id, NULL);
}
+ /*else do nothing*/
+ }
- return ret;
+ return ret;
}
static int
-glusterd_op_ac_rcvd_unlock_acc (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_rcvd_unlock_acc(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
+ int ret = 0;
- GF_ASSERT (event);
+ GF_ASSERT(event);
- if (opinfo.pending_count > 0)
- opinfo.pending_count--;
+ if (opinfo.pending_count > 0)
+ opinfo.pending_count--;
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+ if (opinfo.pending_count > 0)
+ goto out;
- if (opinfo.pending_count > 0)
- goto out;
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, &event->txn_id,
+ NULL);
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACC,
- &event->txn_id, NULL);
-
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
out:
- return ret;
+ return ret;
}
int32_t
-glusterd_op_clear_errstr() {
- opinfo.op_errstr = NULL;
- return 0;
+glusterd_op_clear_errstr()
+{
+ opinfo.op_errstr = NULL;
+ return 0;
}
int32_t
-glusterd_op_set_ctx (void *ctx)
+glusterd_op_set_ctx(void *ctx)
{
+ opinfo.op_ctx = ctx;
- opinfo.op_ctx = ctx;
-
- return 0;
-
+ return 0;
}
int32_t
-glusterd_op_reset_ctx ()
+glusterd_op_reset_ctx()
{
+ glusterd_op_set_ctx(NULL);
- glusterd_op_set_ctx (NULL);
-
- return 0;
+ return 0;
}
int32_t
-glusterd_op_txn_complete (uuid_t *txn_id)
+glusterd_op_txn_complete(uuid_t *txn_id)
{
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
- int32_t op = -1;
- int32_t op_ret = 0;
- int32_t op_errno = 0;
- rpcsvc_request_t *req = NULL;
- void *ctx = NULL;
- char *op_errstr = NULL;
- char *volname = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- priv = this->private;
- GF_ASSERT (priv);
-
- op = glusterd_op_get_op ();
- ctx = glusterd_op_get_ctx ();
- op_ret = opinfo.op_ret;
- op_errno = opinfo.op_errno;
- req = opinfo.req;
- if (opinfo.op_errstr)
- op_errstr = opinfo.op_errstr;
-
- opinfo.op_ret = 0;
- opinfo.op_errno = 0;
- glusterd_op_clear_op ();
- glusterd_op_reset_ctx ();
- glusterd_op_clear_errstr ();
-
- /* Based on the op-version, we release the cluster or mgmt_v3 lock */
- if (priv->op_version < GD_OP_VERSION_3_6_0) {
- ret = glusterd_unlock (MY_UUID);
- /* unlock can't/shouldn't fail here!! */
- if (ret)
- gf_msg (this->name, GF_LOG_CRITICAL, 0,
- GD_MSG_GLUSTERD_UNLOCK_FAIL,
- "Unable to clear local lock, ret: %d", ret);
- else
- gf_msg_debug (this->name, 0, "Cleared local lock");
- } else {
- ret = dict_get_strn (ctx, "volname", SLEN ("volname"),
- &volname);
- if (ret)
- gf_msg (this->name, GF_LOG_INFO, 0,
- GD_MSG_DICT_GET_FAILED,
- "No Volume name present. "
- "Locks have not been held.");
-
- if (volname) {
- ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
- "vol");
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_MGMTV3_UNLOCK_FAIL,
- "Unable to release lock for %s",
- volname);
- }
- }
-
- ret = glusterd_op_send_cli_response (op, op_ret,
- op_errno, req, ctx, op_errstr);
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ int32_t op = -1;
+ int32_t op_ret = 0;
+ int32_t op_errno = 0;
+ rpcsvc_request_t *req = NULL;
+ void *ctx = NULL;
+ char *op_errstr = NULL;
+ char *volname = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ op = glusterd_op_get_op();
+ ctx = glusterd_op_get_ctx();
+ op_ret = opinfo.op_ret;
+ op_errno = opinfo.op_errno;
+ req = opinfo.req;
+ if (opinfo.op_errstr)
+ op_errstr = opinfo.op_errstr;
+
+ opinfo.op_ret = 0;
+ opinfo.op_errno = 0;
+ glusterd_op_clear_op();
+ glusterd_op_reset_ctx();
+ glusterd_op_clear_errstr();
+
+ /* Based on the op-version, we release the cluster or mgmt_v3 lock */
+ if (priv->op_version < GD_OP_VERSION_3_6_0) {
+ ret = glusterd_unlock(MY_UUID);
+ /* unlock can't/shouldn't fail here!! */
+ if (ret)
+ gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_GLUSTERD_UNLOCK_FAIL,
+ "Unable to clear local lock, ret: %d", ret);
+ else
+ gf_msg_debug(this->name, 0, "Cleared local lock");
+ } else {
+ ret = dict_get_strn(ctx, "volname", SLEN("volname"), &volname);
+ if (ret)
+ gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
+ "No Volume name present. "
+ "Locks have not been held.");
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_NO_CLI_RESP,
- "Responding to cli failed, "
- "ret: %d", ret);
- //Ignore this error, else state machine blocks
- ret = 0;
+ if (volname) {
+ ret = glusterd_mgmt_v3_unlock(volname, MY_UUID, "vol");
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
+ "Unable to release lock for %s", volname);
}
+ }
- if (op_errstr && (strcmp (op_errstr, "")))
- GF_FREE (op_errstr);
+ ret = glusterd_op_send_cli_response(op, op_ret, op_errno, req, ctx,
+ op_errstr);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_CLI_RESP,
+ "Responding to cli failed, "
+ "ret: %d",
+ ret);
+ // Ignore this error, else state machine blocks
+ ret = 0;
+ }
- if (priv->pending_quorum_action)
- glusterd_do_quorum_action ();
+ if (op_errstr && (strcmp(op_errstr, "")))
+ GF_FREE(op_errstr);
- /* Clearing the transaction opinfo */
- ret = glusterd_clear_txn_opinfo (txn_id);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_CLEAR_FAIL,
- "Unable to clear transaction's opinfo");
+ if (priv->pending_quorum_action)
+ glusterd_do_quorum_action();
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ /* Clearing the transaction opinfo */
+ ret = glusterd_clear_txn_opinfo(txn_id);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_CLEAR_FAIL,
+ "Unable to clear transaction's opinfo");
+
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_op_ac_unlocked_all (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_unlocked_all(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
+ int ret = 0;
- GF_ASSERT (event);
+ GF_ASSERT(event);
- ret = glusterd_op_txn_complete (&event->txn_id);
+ ret = glusterd_op_txn_complete(&event->txn_id);
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
static int
-glusterd_op_ac_stage_op (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = -1;
- glusterd_req_ctx_t *req_ctx = NULL;
- int32_t status = 0;
- dict_t *rsp_dict = NULL;
- char *op_errstr = NULL;
- dict_t *dict = NULL;
- xlator_t *this = NULL;
- uuid_t *txn_id = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (ctx);
-
- req_ctx = ctx;
-
- dict = req_ctx->dict;
-
- rsp_dict = dict_new ();
- if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_DICT_CREATE_FAIL,
- "Failed to get new dictionary");
- return -1;
- }
-
- status = glusterd_op_stage_validate (req_ctx->op, dict, &op_errstr,
- rsp_dict);
+ int ret = -1;
+ glusterd_req_ctx_t *req_ctx = NULL;
+ int32_t status = 0;
+ dict_t *rsp_dict = NULL;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(ctx);
+
+ req_ctx = ctx;
+
+ dict = req_ctx->dict;
+
+ rsp_dict = dict_new();
+ if (!rsp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
+ "Failed to get new dictionary");
+ return -1;
+ }
+
+ status = glusterd_op_stage_validate(req_ctx->op, dict, &op_errstr,
+ rsp_dict);
+
+ if (status) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED,
+ "Stage failed on operation"
+ " 'Volume %s', Status : %d",
+ gd_op_list[req_ctx->op], status);
+ }
+
+ txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
+
+ if (txn_id)
+ gf_uuid_copy(*txn_id, event->txn_id);
+ else {
+ ret = -1;
+ goto out;
+ }
- if (status) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_VALIDATE_FAILED, "Stage failed on operation"
- " 'Volume %s', Status : %d", gd_op_list[req_ctx->op],
- status);
- }
+ ret = dict_set_bin(rsp_dict, "transaction_id", txn_id, sizeof(*txn_id));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set transaction id.");
+ GF_FREE(txn_id);
+ goto out;
+ }
- txn_id = GF_MALLOC (sizeof(uuid_t), gf_common_mt_uuid_t);
-
- if (txn_id)
- gf_uuid_copy (*txn_id, event->txn_id);
- else {
- ret = -1;
- goto out;
- }
-
- ret = dict_set_bin (rsp_dict, "transaction_id",
- txn_id, sizeof(*txn_id));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set transaction id.");
- GF_FREE (txn_id);
- goto out;
- }
-
- ret = glusterd_op_stage_send_resp (req_ctx->req, req_ctx->op,
- status, op_errstr, rsp_dict);
+ ret = glusterd_op_stage_send_resp(req_ctx->req, req_ctx->op, status,
+ op_errstr, rsp_dict);
out:
- if (op_errstr && (strcmp (op_errstr, "")))
- GF_FREE (op_errstr);
+ if (op_errstr && (strcmp(op_errstr, "")))
+ GF_FREE(op_errstr);
- gf_msg_debug (this->name, 0, "Returning with %d", ret);
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
- if (rsp_dict)
- dict_unref (rsp_dict);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- return ret;
+ return ret;
}
static gf_boolean_t
-glusterd_need_brick_op (glusterd_op_t op)
+glusterd_need_brick_op(glusterd_op_t op)
{
- gf_boolean_t ret = _gf_false;
+ gf_boolean_t ret = _gf_false;
- GF_ASSERT (GD_OP_NONE < op && op < GD_OP_MAX);
+ GF_ASSERT(GD_OP_NONE < op && op < GD_OP_MAX);
- switch (op) {
+ switch (op) {
case GD_OP_PROFILE_VOLUME:
case GD_OP_STATUS_VOLUME:
case GD_OP_TIER_STATUS:
@@ -5989,1468 +5729,1417 @@ glusterd_need_brick_op (glusterd_op_t op)
case GD_OP_HEAL_VOLUME:
case GD_OP_SCRUB_STATUS:
case GD_OP_SCRUB_ONDEMAND:
- ret = _gf_true;
- break;
+ ret = _gf_true;
+ break;
default:
- ret = _gf_false;
- }
+ ret = _gf_false;
+ }
- return ret;
+ return ret;
}
-dict_t*
-glusterd_op_init_commit_rsp_dict (glusterd_op_t op)
+dict_t *
+glusterd_op_init_commit_rsp_dict(glusterd_op_t op)
{
- dict_t *rsp_dict = NULL;
- dict_t *op_ctx = NULL;
+ dict_t *rsp_dict = NULL;
+ dict_t *op_ctx = NULL;
- GF_ASSERT (GD_OP_NONE < op && op < GD_OP_MAX);
+ GF_ASSERT(GD_OP_NONE < op && op < GD_OP_MAX);
- if (glusterd_need_brick_op (op)) {
- op_ctx = glusterd_op_get_ctx ();
- GF_ASSERT (op_ctx);
- rsp_dict = dict_ref (op_ctx);
- } else {
- rsp_dict = dict_new ();
- }
+ if (glusterd_need_brick_op(op)) {
+ op_ctx = glusterd_op_get_ctx();
+ GF_ASSERT(op_ctx);
+ rsp_dict = dict_ref(op_ctx);
+ } else {
+ rsp_dict = dict_new();
+ }
- return rsp_dict;
+ return rsp_dict;
}
static int
-glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_commit_op(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- glusterd_req_ctx_t *req_ctx = NULL;
- int32_t status = 0;
- char *op_errstr = NULL;
- dict_t *dict = NULL;
- dict_t *rsp_dict = NULL;
- xlator_t *this = NULL;
- uuid_t *txn_id = NULL;
- glusterd_op_info_t txn_op_info = {{0},};
- gf_boolean_t need_cleanup = _gf_true;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (ctx);
+ int ret = 0;
+ glusterd_req_ctx_t *req_ctx = NULL;
+ int32_t status = 0;
+ char *op_errstr = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
+ uuid_t *txn_id = NULL;
+ glusterd_op_info_t txn_op_info = {
+ {0},
+ };
+ gf_boolean_t need_cleanup = _gf_true;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(ctx);
+
+ req_ctx = ctx;
+
+ dict = req_ctx->dict;
+
+ rsp_dict = glusterd_op_init_commit_rsp_dict(req_ctx->op);
+ if (NULL == rsp_dict)
+ return -1;
+
+ if (GD_OP_CLEARLOCKS_VOLUME == req_ctx->op) {
+ /*clear locks should be run only on
+ * originator glusterd*/
+ status = 0;
+
+ } else {
+ status = glusterd_op_commit_perform(req_ctx->op, dict, &op_errstr,
+ rsp_dict);
+ }
+
+ if (status)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
+ "Commit of operation "
+ "'Volume %s' failed: %d",
+ gd_op_list[req_ctx->op], status);
+
+ txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
+
+ if (txn_id)
+ gf_uuid_copy(*txn_id, event->txn_id);
+ else {
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
+ if (ret) {
+ gf_msg_callingfn(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_OPINFO_GET_FAIL,
+ "Unable to get transaction opinfo "
+ "for transaction ID : %s",
+ uuid_utoa(event->txn_id));
+ goto out;
+ }
+
+ ret = dict_set_bin(rsp_dict, "transaction_id", txn_id, sizeof(*txn_id));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set transaction id.");
+ if (txn_op_info.skip_locking)
+ ret = glusterd_clear_txn_opinfo(txn_id);
+ need_cleanup = _gf_false;
+ GF_FREE(txn_id);
+ goto out;
+ }
+
+ ret = glusterd_op_commit_send_resp(req_ctx->req, req_ctx->op, status,
+ op_errstr, rsp_dict);
- req_ctx = ctx;
+out:
+ if (op_errstr && (strcmp(op_errstr, "")))
+ GF_FREE(op_errstr);
+
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+ /* for no volname transactions, the txn_opinfo needs to be cleaned up
+ * as there's no unlock event triggered
+ */
+ if (need_cleanup && txn_id && txn_op_info.skip_locking)
+ ret = glusterd_clear_txn_opinfo(txn_id);
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
+
+ return ret;
+}
- dict = req_ctx->dict;
+static int
+glusterd_op_ac_send_commit_failed(glusterd_op_sm_event_t *event, void *ctx)
+{
+ int ret = 0;
+ glusterd_req_ctx_t *req_ctx = NULL;
+ dict_t *op_ctx = NULL;
- rsp_dict = glusterd_op_init_commit_rsp_dict (req_ctx->op);
- if (NULL == rsp_dict)
- return -1;
+ GF_ASSERT(ctx);
+ req_ctx = ctx;
- if (GD_OP_CLEARLOCKS_VOLUME == req_ctx->op) {
- /*clear locks should be run only on
- * originator glusterd*/
- status = 0;
+ op_ctx = glusterd_op_get_ctx();
- } else {
- status = glusterd_op_commit_perform (req_ctx->op, dict,
- &op_errstr, rsp_dict);
- }
+ ret = glusterd_op_commit_send_resp(req_ctx->req, req_ctx->op, opinfo.op_ret,
+ opinfo.op_errstr, op_ctx);
- if (status)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_COMMIT_OP_FAIL, "Commit of operation "
- "'Volume %s' failed: %d", gd_op_list[req_ctx->op],
- status);
+ if (opinfo.op_errstr && (strcmp(opinfo.op_errstr, ""))) {
+ GF_FREE(opinfo.op_errstr);
+ opinfo.op_errstr = NULL;
+ }
- txn_id = GF_MALLOC (sizeof(uuid_t), gf_common_mt_uuid_t);
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
- if (txn_id)
- gf_uuid_copy (*txn_id, event->txn_id);
- else {
- ret = -1;
- goto out;
- }
- ret = glusterd_get_txn_opinfo (&event->txn_id, &txn_op_info);
- if (ret) {
- gf_msg_callingfn (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_GET_FAIL,
- "Unable to get transaction opinfo "
- "for transaction ID : %s",
- uuid_utoa (event->txn_id));
- goto out;
- }
+ gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
+ return ret;
+}
- ret = dict_set_bin (rsp_dict, "transaction_id",
- txn_id, sizeof(*txn_id));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set transaction id.");
- if (txn_op_info.skip_locking)
- ret = glusterd_clear_txn_opinfo (txn_id);
- need_cleanup = _gf_false;
- GF_FREE (txn_id);
- goto out;
- }
+static int
+glusterd_op_sm_transition_state(glusterd_op_info_t *opinfo,
+ glusterd_op_sm_t *state,
+ glusterd_op_sm_event_type_t event_type)
+{
+ glusterd_conf_t *conf = NULL;
- ret = glusterd_op_commit_send_resp (req_ctx->req, req_ctx->op,
- status, op_errstr, rsp_dict);
+ GF_ASSERT(state);
+ GF_ASSERT(opinfo);
-out:
- if (op_errstr && (strcmp (op_errstr, "")))
- GF_FREE (op_errstr);
+ conf = THIS->private;
+ GF_ASSERT(conf);
- if (rsp_dict)
- dict_unref (rsp_dict);
- /* for no volname transactions, the txn_opinfo needs to be cleaned up
- * as there's no unlock event triggered
- */
- if (need_cleanup && txn_id && txn_op_info.skip_locking)
- ret = glusterd_clear_txn_opinfo (txn_id);
- gf_msg_debug (this->name, 0, "Returning with %d", ret);
+ (void)glusterd_sm_tr_log_transition_add(
+ &conf->op_sm_log, opinfo->state.state, state[event_type].next_state,
+ event_type);
- return ret;
+ opinfo->state.state = state[event_type].next_state;
+ return 0;
}
-static int
-glusterd_op_ac_send_commit_failed (glusterd_op_sm_event_t *event, void *ctx)
+int32_t
+glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
{
- int ret = 0;
- glusterd_req_ctx_t *req_ctx = NULL;
- dict_t *op_ctx = NULL;
+ int ret = -1;
+ xlator_t *this = THIS;
- GF_ASSERT (ctx);
+ switch (op) {
+ case GD_OP_CREATE_VOLUME:
+ ret = glusterd_op_stage_create_volume(dict, op_errstr, rsp_dict);
+ break;
- req_ctx = ctx;
+ case GD_OP_START_VOLUME:
+ ret = glusterd_op_stage_start_volume(dict, op_errstr, rsp_dict);
+ break;
- op_ctx = glusterd_op_get_ctx ();
+ case GD_OP_STOP_VOLUME:
+ ret = glusterd_op_stage_stop_volume(dict, op_errstr);
+ break;
- ret = glusterd_op_commit_send_resp (req_ctx->req, req_ctx->op,
- opinfo.op_ret, opinfo.op_errstr,
- op_ctx);
+ case GD_OP_DELETE_VOLUME:
+ ret = glusterd_op_stage_delete_volume(dict, op_errstr);
+ break;
- if (opinfo.op_errstr && (strcmp (opinfo.op_errstr, ""))) {
- GF_FREE (opinfo.op_errstr);
- opinfo.op_errstr = NULL;
- }
+ case GD_OP_ADD_BRICK:
+ ret = glusterd_op_stage_add_brick(dict, op_errstr, rsp_dict);
+ break;
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ case GD_OP_REPLACE_BRICK:
+ ret = glusterd_op_stage_replace_brick(dict, op_errstr, rsp_dict);
+ break;
+ case GD_OP_SET_VOLUME:
+ ret = glusterd_op_stage_set_volume(dict, op_errstr);
+ break;
- gf_msg_debug (THIS->name, 0, "Returning with %d", ret);
- return ret;
-}
+ case GD_OP_RESET_VOLUME:
+ ret = glusterd_op_stage_reset_volume(dict, op_errstr);
+ break;
+ case GD_OP_REMOVE_BRICK:
+ ret = glusterd_op_stage_remove_brick(dict, op_errstr);
+ break;
-static int
-glusterd_op_sm_transition_state (glusterd_op_info_t *opinfo,
- glusterd_op_sm_t *state,
- glusterd_op_sm_event_type_t event_type)
-{
- glusterd_conf_t *conf = NULL;
+ case GD_OP_LOG_ROTATE:
+ ret = glusterd_op_stage_log_rotate(dict, op_errstr);
+ break;
- GF_ASSERT (state);
- GF_ASSERT (opinfo);
+ case GD_OP_SYNC_VOLUME:
+ ret = glusterd_op_stage_sync_volume(dict, op_errstr);
+ break;
- conf = THIS->private;
- GF_ASSERT (conf);
+ case GD_OP_GSYNC_CREATE:
+ ret = glusterd_op_stage_gsync_create(dict, op_errstr);
+ break;
- (void) glusterd_sm_tr_log_transition_add (&conf->op_sm_log,
- opinfo->state.state,
- state[event_type].next_state,
- event_type);
+ case GD_OP_GSYNC_SET:
+ ret = glusterd_op_stage_gsync_set(dict, op_errstr);
+ break;
- opinfo->state.state = state[event_type].next_state;
- return 0;
-}
+ case GD_OP_PROFILE_VOLUME:
+ ret = glusterd_op_stage_stats_volume(dict, op_errstr);
+ break;
-int32_t
-glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,
- dict_t *rsp_dict)
-{
- int ret = -1;
- xlator_t *this = THIS;
+ case GD_OP_QUOTA:
+ ret = glusterd_op_stage_quota(dict, op_errstr, rsp_dict);
+ break;
- switch (op) {
- case GD_OP_CREATE_VOLUME:
- ret = glusterd_op_stage_create_volume (dict, op_errstr,
- rsp_dict);
- break;
-
- case GD_OP_START_VOLUME:
- ret = glusterd_op_stage_start_volume (dict, op_errstr,
- rsp_dict);
- break;
-
- case GD_OP_STOP_VOLUME:
- ret = glusterd_op_stage_stop_volume (dict, op_errstr);
- break;
-
- case GD_OP_DELETE_VOLUME:
- ret = glusterd_op_stage_delete_volume (dict, op_errstr);
- break;
-
- case GD_OP_ADD_BRICK:
- ret = glusterd_op_stage_add_brick (dict, op_errstr,
- rsp_dict);
- break;
-
- case GD_OP_REPLACE_BRICK:
- ret = glusterd_op_stage_replace_brick (dict, op_errstr,
- rsp_dict);
- break;
-
- case GD_OP_SET_VOLUME:
- ret = glusterd_op_stage_set_volume (dict, op_errstr);
- break;
-
- case GD_OP_RESET_VOLUME:
- ret = glusterd_op_stage_reset_volume (dict, op_errstr);
- break;
- case GD_OP_REMOVE_BRICK:
- ret = glusterd_op_stage_remove_brick (dict, op_errstr);
- break;
-
- case GD_OP_LOG_ROTATE:
- ret = glusterd_op_stage_log_rotate (dict, op_errstr);
- break;
-
- case GD_OP_SYNC_VOLUME:
- ret = glusterd_op_stage_sync_volume (dict, op_errstr);
- break;
-
- case GD_OP_GSYNC_CREATE:
- ret = glusterd_op_stage_gsync_create (dict, op_errstr);
- break;
-
- case GD_OP_GSYNC_SET:
- ret = glusterd_op_stage_gsync_set (dict, op_errstr);
- break;
-
- case GD_OP_PROFILE_VOLUME:
- ret = glusterd_op_stage_stats_volume (dict, op_errstr);
- break;
-
- case GD_OP_QUOTA:
- ret = glusterd_op_stage_quota (dict, op_errstr,
- rsp_dict);
- break;
-
- case GD_OP_STATUS_VOLUME:
- ret = glusterd_op_stage_status_volume (dict, op_errstr);
- break;
-
- case GD_OP_REBALANCE:
- case GD_OP_DEFRAG_BRICK_VOLUME:
- ret = glusterd_op_stage_rebalance (dict, op_errstr);
- break;
-
- case GD_OP_HEAL_VOLUME:
- ret = glusterd_op_stage_heal_volume (dict, op_errstr);
- break;
-
- case GD_OP_STATEDUMP_VOLUME:
- ret = glusterd_op_stage_statedump_volume (dict,
- op_errstr);
- break;
- case GD_OP_CLEARLOCKS_VOLUME:
- ret = glusterd_op_stage_clearlocks_volume (dict,
- op_errstr);
- break;
-
- case GD_OP_COPY_FILE:
- ret = glusterd_op_stage_copy_file (dict, op_errstr);
- break;
-
- case GD_OP_SYS_EXEC:
- ret = glusterd_op_stage_sys_exec (dict, op_errstr);
- break;
-
- case GD_OP_BARRIER:
- ret = glusterd_op_stage_barrier (dict, op_errstr);
- break;
-
- case GD_OP_BITROT:
- case GD_OP_SCRUB_STATUS:
- case GD_OP_SCRUB_ONDEMAND:
- ret = glusterd_op_stage_bitrot (dict, op_errstr,
- rsp_dict);
- break;
-
- default:
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_INVALID_ENTRY, "Unknown op %s",
- gd_op_list[op]);
- }
-
- gf_msg_debug (this->name, 0, "OP = %d. Returning %d", op, ret);
- return ret;
+ case GD_OP_STATUS_VOLUME:
+ ret = glusterd_op_stage_status_volume(dict, op_errstr);
+ break;
+
+ case GD_OP_REBALANCE:
+ case GD_OP_DEFRAG_BRICK_VOLUME:
+ ret = glusterd_op_stage_rebalance(dict, op_errstr);
+ break;
+
+ case GD_OP_HEAL_VOLUME:
+ ret = glusterd_op_stage_heal_volume(dict, op_errstr);
+ break;
+
+ case GD_OP_STATEDUMP_VOLUME:
+ ret = glusterd_op_stage_statedump_volume(dict, op_errstr);
+ break;
+ case GD_OP_CLEARLOCKS_VOLUME:
+ ret = glusterd_op_stage_clearlocks_volume(dict, op_errstr);
+ break;
+
+ case GD_OP_COPY_FILE:
+ ret = glusterd_op_stage_copy_file(dict, op_errstr);
+ break;
+
+ case GD_OP_SYS_EXEC:
+ ret = glusterd_op_stage_sys_exec(dict, op_errstr);
+ break;
+
+ case GD_OP_BARRIER:
+ ret = glusterd_op_stage_barrier(dict, op_errstr);
+ break;
+
+ case GD_OP_BITROT:
+ case GD_OP_SCRUB_STATUS:
+ case GD_OP_SCRUB_ONDEMAND:
+ ret = glusterd_op_stage_bitrot(dict, op_errstr, rsp_dict);
+ break;
+
+ default:
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
+ "Unknown op %s", gd_op_list[op]);
+ }
+
+ gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret);
+ return ret;
}
static void
-glusterd_wait_for_blockers (glusterd_conf_t *priv)
+glusterd_wait_for_blockers(glusterd_conf_t *priv)
{
- while (priv->blockers) {
- synclock_unlock (&priv->big_lock);
- sleep (1);
- synclock_lock (&priv->big_lock);
- }
+ while (priv->blockers) {
+ synclock_unlock(&priv->big_lock);
+ sleep(1);
+ synclock_lock(&priv->big_lock);
+ }
}
int32_t
-glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr,
- dict_t *rsp_dict)
+glusterd_op_commit_perform(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
{
- int ret = -1;
- xlator_t *this = THIS;
+ int ret = -1;
+ xlator_t *this = THIS;
- glusterd_op_commit_hook (op, dict, GD_COMMIT_HOOK_PRE);
- switch (op) {
- case GD_OP_CREATE_VOLUME:
- ret = glusterd_op_create_volume (dict, op_errstr);
- break;
-
- case GD_OP_START_VOLUME:
- ret = glusterd_op_start_volume (dict, op_errstr);
- break;
-
- case GD_OP_STOP_VOLUME:
- glusterd_wait_for_blockers (this->private);
- ret = glusterd_op_stop_volume (dict);
- break;
-
- case GD_OP_DELETE_VOLUME:
- glusterd_wait_for_blockers (this->private);
- ret = glusterd_op_delete_volume (dict);
- break;
-
- case GD_OP_ADD_BRICK:
- glusterd_wait_for_blockers (this->private);
- ret = glusterd_op_add_brick (dict, op_errstr);
- break;
-
- case GD_OP_REPLACE_BRICK:
- glusterd_wait_for_blockers (this->private);
- ret = glusterd_op_replace_brick (dict, rsp_dict);
- break;
-
- case GD_OP_SET_VOLUME:
- ret = glusterd_op_set_volume (dict, op_errstr);
- break;
-
-
- case GD_OP_RESET_VOLUME:
- ret = glusterd_op_reset_volume (dict, op_errstr);
- break;
-
- case GD_OP_REMOVE_BRICK:
- glusterd_wait_for_blockers (this->private);
- ret = glusterd_op_remove_brick (dict, op_errstr);
- break;
-
- case GD_OP_LOG_ROTATE:
- ret = glusterd_op_log_rotate (dict);
- break;
-
- case GD_OP_SYNC_VOLUME:
- ret = glusterd_op_sync_volume (dict, op_errstr, rsp_dict);
- break;
-
- case GD_OP_GSYNC_CREATE:
- ret = glusterd_op_gsync_create (dict, op_errstr,
- rsp_dict);
- break;
-
- case GD_OP_GSYNC_SET:
- ret = glusterd_op_gsync_set (dict, op_errstr, rsp_dict);
- break;
-
- case GD_OP_PROFILE_VOLUME:
- ret = glusterd_op_stats_volume (dict, op_errstr,
- rsp_dict);
- break;
-
- case GD_OP_QUOTA:
- ret = glusterd_op_quota (dict, op_errstr, rsp_dict);
- break;
-
- case GD_OP_STATUS_VOLUME:
- ret = glusterd_op_status_volume (dict, op_errstr, rsp_dict);
- break;
-
- case GD_OP_REBALANCE:
- case GD_OP_DEFRAG_BRICK_VOLUME:
- ret = glusterd_op_rebalance (dict, op_errstr, rsp_dict);
- break;
-
- case GD_OP_HEAL_VOLUME:
- ret = glusterd_op_heal_volume (dict, op_errstr);
- break;
-
- case GD_OP_STATEDUMP_VOLUME:
- ret = glusterd_op_statedump_volume (dict, op_errstr);
- break;
-
- case GD_OP_CLEARLOCKS_VOLUME:
- ret = glusterd_op_clearlocks_volume (dict, op_errstr,
- rsp_dict);
- break;
-
- case GD_OP_COPY_FILE:
- ret = glusterd_op_copy_file (dict, op_errstr);
- break;
-
- case GD_OP_SYS_EXEC:
- ret = glusterd_op_sys_exec (dict, op_errstr, rsp_dict);
- break;
-
- case GD_OP_BARRIER:
- ret = glusterd_op_barrier (dict, op_errstr);
- break;
-
- case GD_OP_BITROT:
- case GD_OP_SCRUB_STATUS:
- case GD_OP_SCRUB_ONDEMAND:
- ret = glusterd_op_bitrot (dict, op_errstr, rsp_dict);
- break;
-
- default:
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_INVALID_ENTRY, "Unknown op %s",
- gd_op_list[op]);
- break;
- }
-
- if (ret == 0)
- glusterd_op_commit_hook (op, dict, GD_COMMIT_HOOK_POST);
+ glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_PRE);
+ switch (op) {
+ case GD_OP_CREATE_VOLUME:
+ ret = glusterd_op_create_volume(dict, op_errstr);
+ break;
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
-}
+ case GD_OP_START_VOLUME:
+ ret = glusterd_op_start_volume(dict, op_errstr);
+ break;
+ case GD_OP_STOP_VOLUME:
+ glusterd_wait_for_blockers(this->private);
+ ret = glusterd_op_stop_volume(dict);
+ break;
-static int
-glusterd_bricks_select_stop_volume (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
-{
- int ret = 0;
- int flags = 0;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_pending_node_t *pending_node = NULL;
-
- ret = glusterd_op_stop_volume_args_get (dict, &volname, &flags);
- if (ret)
- goto out;
+ case GD_OP_DELETE_VOLUME:
+ glusterd_wait_for_blockers(this->private);
+ ret = glusterd_op_delete_volume(dict);
+ break;
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, FMTSTR_CHECK_VOL_EXISTS,
- volname);
- gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
- goto out;
- }
+ case GD_OP_ADD_BRICK:
+ glusterd_wait_for_blockers(this->private);
+ ret = glusterd_op_add_brick(dict, op_errstr);
+ break;
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (glusterd_is_brick_started (brickinfo)) {
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- cds_list_add_tail (&pending_node->list,
- selected);
- pending_node = NULL;
- }
- /*
- * This is not really the right place to do it, but
- * it's the most convenient.
- * TBD: move this to *after* the RPC
- */
- brickinfo->status = GF_BRICK_STOPPED;
- }
- }
+ case GD_OP_REPLACE_BRICK:
+ glusterd_wait_for_blockers(this->private);
+ ret = glusterd_op_replace_brick(dict, rsp_dict);
+ break;
-out:
- return ret;
-}
+ case GD_OP_SET_VOLUME:
+ ret = glusterd_op_set_volume(dict, op_errstr);
+ break;
-static int
-glusterd_bricks_select_remove_brick (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
-{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- char *brick = NULL;
- int32_t count = 0;
- int32_t i = 1;
- char key[64] = {0,};
- int keylen;
- glusterd_pending_node_t *pending_node = NULL;
- int32_t command = 0;
- int32_t force = 0;
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
+ case GD_OP_RESET_VOLUME:
+ ret = glusterd_op_reset_volume(dict, op_errstr);
+ break;
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get volume name");
- goto out;
- }
+ case GD_OP_REMOVE_BRICK:
+ glusterd_wait_for_blockers(this->private);
+ ret = glusterd_op_remove_brick(dict, op_errstr);
+ break;
- ret = glusterd_volinfo_find (volname, &volinfo);
+ case GD_OP_LOG_ROTATE:
+ ret = glusterd_op_log_rotate(dict);
+ break;
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "Unable to allocate memory");
- goto out;
- }
+ case GD_OP_SYNC_VOLUME:
+ ret = glusterd_op_sync_volume(dict, op_errstr, rsp_dict);
+ break;
- ret = dict_get_int32n (dict, "count", SLEN ("count"), &count);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, -ret,
- GD_MSG_DICT_GET_FAILED, "Unable to get count");
- goto out;
- }
+ case GD_OP_GSYNC_CREATE:
+ ret = glusterd_op_gsync_create(dict, op_errstr, rsp_dict);
+ break;
- ret = dict_get_int32n (dict, "command", SLEN ("command"),
- &command);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, -ret,
- GD_MSG_DICT_GET_FAILED, "Unable to get command");
- goto out;
- }
+ case GD_OP_GSYNC_SET:
+ ret = glusterd_op_gsync_set(dict, op_errstr, rsp_dict);
+ break;
- if (command == GF_DEFRAG_CMD_DETACH_START)
- return glusterd_bricks_select_tier_volume(dict, op_errstr,
- selected);
+ case GD_OP_PROFILE_VOLUME:
+ ret = glusterd_op_stats_volume(dict, op_errstr, rsp_dict);
+ break;
- ret = dict_get_int32n (dict, "force", SLEN ("force"), &force);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_INFO, 0,
- GD_MSG_DICT_GET_FAILED, "force flag is not set");
- ret = 0;
- goto out;
- }
+ case GD_OP_QUOTA:
+ ret = glusterd_op_quota(dict, op_errstr, rsp_dict);
+ break;
- while ( i <= count) {
- keylen = snprintf (key, sizeof (key), "brick%d", i);
+ case GD_OP_STATUS_VOLUME:
+ ret = glusterd_op_status_volume(dict, op_errstr, rsp_dict);
+ break;
- ret = dict_get_strn (dict, key, keylen, &brick);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get brick");
- goto out;
- }
+ case GD_OP_REBALANCE:
+ case GD_OP_DEFRAG_BRICK_VOLUME:
+ ret = glusterd_op_rebalance(dict, op_errstr, rsp_dict);
+ break;
- ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
- &brickinfo,
- _gf_false);
+ case GD_OP_HEAL_VOLUME:
+ ret = glusterd_op_heal_volume(dict, op_errstr);
+ break;
- if (ret)
- goto out;
+ case GD_OP_STATEDUMP_VOLUME:
+ ret = glusterd_op_statedump_volume(dict, op_errstr);
+ break;
- if (glusterd_is_brick_started (brickinfo)) {
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- cds_list_add_tail (&pending_node->list,
- selected);
- pending_node = NULL;
- }
- /*
- * This is not really the right place to do it, but
- * it's the most convenient.
- * TBD: move this to *after* the RPC
- */
- brickinfo->status = GF_BRICK_STOPPED;
- }
- i++;
+ case GD_OP_CLEARLOCKS_VOLUME:
+ ret = glusterd_op_clearlocks_volume(dict, op_errstr, rsp_dict);
+ break;
+
+ case GD_OP_COPY_FILE:
+ ret = glusterd_op_copy_file(dict, op_errstr);
+ break;
+
+ case GD_OP_SYS_EXEC:
+ ret = glusterd_op_sys_exec(dict, op_errstr, rsp_dict);
+ break;
+
+ case GD_OP_BARRIER:
+ ret = glusterd_op_barrier(dict, op_errstr);
+ break;
+
+ case GD_OP_BITROT:
+ case GD_OP_SCRUB_STATUS:
+ case GD_OP_SCRUB_ONDEMAND:
+ ret = glusterd_op_bitrot(dict, op_errstr, rsp_dict);
+ break;
+
+ default:
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
+ "Unknown op %s", gd_op_list[op]);
+ break;
+ }
+
+ if (ret == 0)
+ glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_POST);
+
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
+}
+
+static int
+glusterd_bricks_select_stop_volume(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected)
+{
+ int ret = 0;
+ int flags = 0;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_pending_node_t *pending_node = NULL;
+
+ ret = glusterd_op_stop_volume_args_get(dict, &volname, &flags);
+ if (ret)
+ goto out;
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ FMTSTR_CHECK_VOL_EXISTS, volname);
+ gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
+ goto out;
+ }
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (glusterd_is_brick_started(brickinfo)) {
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ } else {
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ }
+ /*
+ * This is not really the right place to do it, but
+ * it's the most convenient.
+ * TBD: move this to *after* the RPC
+ */
+ brickinfo->status = GF_BRICK_STOPPED;
}
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_bricks_select_profile_volume (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
+glusterd_bricks_select_remove_brick(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected)
{
- int ret = -1;
- char *volname = NULL;
- char msg[2048] = {0,};
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- int32_t stats_op = GF_CLI_STATS_NONE;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_pending_node_t *pending_node = NULL;
- char *brick = NULL;
- int32_t pid = -1;
- char pidfile[PATH_MAX] = {0};
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ char *brick = NULL;
+ int32_t count = 0;
+ int32_t i = 1;
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+ glusterd_pending_node_t *pending_node = NULL;
+ int32_t command = 0;
+ int32_t force = 0;
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Unable to allocate memory");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
+ "Unable to get count");
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "command", SLEN("command"), &command);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
+ "Unable to get command");
+ goto out;
+ }
+
+ if (command == GF_DEFRAG_CMD_DETACH_START)
+ return glusterd_bricks_select_tier_volume(dict, op_errstr, selected);
+
+ ret = dict_get_int32n(dict, "force", SLEN("force"), &force);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
+ "force flag is not set");
+ ret = 0;
+ goto out;
+ }
+
+ while (i <= count) {
+ keylen = snprintf(key, sizeof(key), "brick%d", i);
+
+ ret = dict_get_strn(dict, key, keylen, &brick);
if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "volume name get failed");
- goto out;
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get brick");
+ goto out;
}
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof (msg), "Volume %s does not exists",
- volname);
+ ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
+ _gf_false);
- *op_errstr = gf_strdup (msg);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", msg);
- goto out;
- }
+ if (ret)
+ goto out;
- ret = dict_get_int32n (dict, "op", SLEN ("op"), &stats_op);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "volume profile op get failed");
+ if (glusterd_is_brick_started(brickinfo)) {
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
goto out;
+ } else {
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ }
+ /*
+ * This is not really the right place to do it, but
+ * it's the most convenient.
+ * TBD: move this to *after* the RPC
+ */
+ brickinfo->status = GF_BRICK_STOPPED;
}
+ i++;
+ }
- switch (stats_op) {
+out:
+ return ret;
+}
+
+static int
+glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected)
+{
+ int ret = -1;
+ char *volname = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ int32_t stats_op = GF_CLI_STATS_NONE;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_pending_node_t *pending_node = NULL;
+ char *brick = NULL;
+ int32_t pid = -1;
+ char pidfile[PATH_MAX] = {0};
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "volume name get failed");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Volume %s does not exists", volname);
+
+ *op_errstr = gf_strdup(msg);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "op", SLEN("op"), &stats_op);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "volume profile op get failed");
+ goto out;
+ }
+
+ switch (stats_op) {
case GF_CLI_STATS_START:
case GF_CLI_STATS_STOP:
- goto out;
- break;
+ goto out;
+ break;
case GF_CLI_STATS_INFO:
- ret = dict_get_str_boolean (dict, "nfs", _gf_false);
- if (ret) {
- if (!priv->nfs_svc.online) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_NFS_SERVER_NOT_RUNNING,
- "NFS server"
- " is not running");
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = &(priv->nfs_svc);
- pending_node->type = GD_NODE_NFS;
- cds_list_add_tail (&pending_node->list, selected);
- pending_node = NULL;
+ ret = dict_get_str_boolean(dict, "nfs", _gf_false);
+ if (ret) {
+ if (!priv->nfs_svc.online) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_NFS_SERVER_NOT_RUNNING,
+ "NFS server"
+ " is not running");
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->nfs_svc);
+ pending_node->type = GD_NODE_NFS;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
- ret = 0;
+ ret = 0;
+ goto out;
+ }
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (glusterd_is_brick_started(brickinfo)) {
+ /*
+ * In normal use, glusterd_is_brick_started
+ * will give us the answer we need. However,
+ * in our tests the brick gets detached behind
+ * our back, so we need to double-check this
+ * way.
+ */
+ GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo,
+ priv);
+ if (!gf_is_service_running(pidfile, &pid)) {
+ continue;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
goto out;
-
- }
- cds_list_for_each_entry (brickinfo, &volinfo->bricks,
- brick_list) {
- if (glusterd_is_brick_started (brickinfo)) {
- /*
- * In normal use, glusterd_is_brick_started
- * will give us the answer we need. However,
- * in our tests the brick gets detached behind
- * our back, so we need to double-check this
- * way.
- */
- GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo,
- brickinfo, priv);
- if (!gf_is_service_running (pidfile, &pid)) {
- continue;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- cds_list_add_tail (&pending_node->list,
- selected);
- pending_node = NULL;
- }
- }
+ } else {
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ }
}
- break;
+ }
+ break;
case GF_CLI_STATS_TOP:
- ret = dict_get_str_boolean (dict, "nfs", _gf_false);
- if (ret) {
- if (!priv->nfs_svc.online) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_NFS_SERVER_NOT_RUNNING,
- "NFS server"
- " is not running");
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = &(priv->nfs_svc);
- pending_node->type = GD_NODE_NFS;
- cds_list_add_tail (&pending_node->list, selected);
- pending_node = NULL;
-
- ret = 0;
- goto out;
-
- }
- ret = dict_get_strn (dict, "brick", SLEN ("brick"), &brick);
- if (!ret) {
- ret = glusterd_volume_brickinfo_get_by_brick
- (brick, volinfo, &brickinfo,
- _gf_true);
- if (ret)
- goto out;
-
- if (!glusterd_is_brick_started (brickinfo))
- goto out;
-
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- cds_list_add_tail (&pending_node->list,
- selected);
- pending_node = NULL;
- goto out;
- }
+ ret = dict_get_str_boolean(dict, "nfs", _gf_false);
+ if (ret) {
+ if (!priv->nfs_svc.online) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_NFS_SERVER_NOT_RUNNING,
+ "NFS server"
+ " is not running");
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
}
+ pending_node->node = &(priv->nfs_svc);
+ pending_node->type = GD_NODE_NFS;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+
ret = 0;
- cds_list_for_each_entry (brickinfo, &volinfo->bricks,
- brick_list) {
- if (glusterd_is_brick_started (brickinfo)) {
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- cds_list_add_tail (&pending_node->list,
- selected);
- pending_node = NULL;
- }
- }
+ goto out;
+ }
+ ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick);
+ if (!ret) {
+ ret = glusterd_volume_brickinfo_get_by_brick(
+ brick, volinfo, &brickinfo, _gf_true);
+ if (ret)
+ goto out;
+
+ if (!glusterd_is_brick_started(brickinfo))
+ goto out;
+
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ } else {
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ goto out;
+ }
+ }
+ ret = 0;
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (glusterd_is_brick_started(brickinfo)) {
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ } else {
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ }
}
- break;
+ }
+ break;
default:
- GF_ASSERT (0);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_INVALID_ENTRY, "Invalid profile op: %d",
- stats_op);
- ret = -1;
- goto out;
- break;
- }
-
+ GF_ASSERT(0);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
+ "Invalid profile op: %d", stats_op);
+ ret = -1;
+ goto out;
+ break;
+ }
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
- return ret;
+ return ret;
}
int
-_get_hxl_children_count (glusterd_volinfo_t *volinfo)
+_get_hxl_children_count(glusterd_volinfo_t *volinfo)
{
- if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
- return volinfo->disperse_count;
- } else {
- return volinfo->replica_count;
- }
+ if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
+ return volinfo->disperse_count;
+ } else {
+ return volinfo->replica_count;
+ }
}
static int
-_add_hxlator_to_dict (dict_t *dict, glusterd_volinfo_t *volinfo, int index,
- int count)
+_add_hxlator_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int index,
+ int count)
{
- int ret = -1;
- char key[64] = {0,};
- int keylen;
- char *xname = NULL;
- char *xl_type = 0;
-
- if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
- xl_type = "disperse";
- } else {
- xl_type = "replicate";
- }
- keylen = snprintf (key, sizeof (key), "xl-%d", count);
- ret = gf_asprintf (&xname, "%s-%s-%d", volinfo->volname, xl_type,
- index);
- if (ret == -1)
- goto out;
-
- ret = dict_set_dynstrn (dict, key, keylen, xname);
- if (ret)
- goto out;
-
- ret = dict_set_int32 (dict, xname, index);
+ int ret = -1;
+ char key[64] = {
+ 0,
+ };
+ int keylen;
+ char *xname = NULL;
+ char *xl_type = 0;
+
+ if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
+ xl_type = "disperse";
+ } else {
+ xl_type = "replicate";
+ }
+ keylen = snprintf(key, sizeof(key), "xl-%d", count);
+ ret = gf_asprintf(&xname, "%s-%s-%d", volinfo->volname, xl_type, index);
+ if (ret == -1)
+ goto out;
+
+ ret = dict_set_dynstrn(dict, key, keylen, xname);
+ if (ret)
+ goto out;
+
+ ret = dict_set_int32(dict, xname, index);
out:
- return ret;
+ return ret;
}
int
-get_replica_index_for_per_replica_cmd (glusterd_volinfo_t *volinfo,
- dict_t *dict)
+get_replica_index_for_per_replica_cmd(glusterd_volinfo_t *volinfo, dict_t *dict)
{
- int ret = 0;
- char *hostname = NULL;
- char *path = NULL;
- int index = 0;
- glusterd_brickinfo_t *brickinfo = NULL;
- int cmd_replica_index = -1;
- int replica_count = -1;
-
-
- if (!dict) {
- ret = -1;
- goto out;
- }
-
- ret = dict_get_strn (dict, "per-replica-cmd-hostname",
- SLEN ("per-replica-cmd-hostname"), &hostname);
- if (ret)
- goto out;
- ret = dict_get_strn (dict, "per-replica-cmd-path",
- SLEN ("per-replica-cmd-path"), &path);
- if (ret)
- goto out;
-
- replica_count = volinfo->replica_count;
-
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (gf_uuid_is_null (brickinfo->uuid))
- (void)glusterd_resolve_brick (brickinfo);
- if (!strcmp (brickinfo->path, path) &&
- !strcmp (brickinfo->hostname, hostname)) {
- cmd_replica_index = index/(replica_count);
- goto out;
- }
- index++;
- }
-
+ int ret = 0;
+ char *hostname = NULL;
+ char *path = NULL;
+ int index = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int cmd_replica_index = -1;
+ int replica_count = -1;
+
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_strn(dict, "per-replica-cmd-hostname",
+ SLEN("per-replica-cmd-hostname"), &hostname);
+ if (ret)
+ goto out;
+ ret = dict_get_strn(dict, "per-replica-cmd-path",
+ SLEN("per-replica-cmd-path"), &path);
+ if (ret)
+ goto out;
+
+ replica_count = volinfo->replica_count;
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_is_null(brickinfo->uuid))
+ (void)glusterd_resolve_brick(brickinfo);
+ if (!strcmp(brickinfo->path, path) &&
+ !strcmp(brickinfo->hostname, hostname)) {
+ cmd_replica_index = index / (replica_count);
+ goto out;
+ }
+ index++;
+ }
out:
- if (ret)
- cmd_replica_index = -1;
+ if (ret)
+ cmd_replica_index = -1;
- return cmd_replica_index;
+ return cmd_replica_index;
}
int
-_select_hxlator_with_matching_brick (xlator_t *this,
- glusterd_volinfo_t *volinfo, dict_t *dict,
- int *index)
+_select_hxlator_with_matching_brick(xlator_t *this, glusterd_volinfo_t *volinfo,
+ dict_t *dict, int *index)
{
- char *path = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- int hxl_children = 0;
+ char *path = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int hxl_children = 0;
- if (!dict ||
- dict_get_strn (dict, "per-replica-cmd-path",
- SLEN ("per-replica-cmd-path"), &path))
- return -1;
+ if (!dict || dict_get_strn(dict, "per-replica-cmd-path",
+ SLEN("per-replica-cmd-path"), &path))
+ return -1;
- hxl_children = _get_hxl_children_count (volinfo);
- if ((*index) == 0)
- (*index)++;
+ hxl_children = _get_hxl_children_count(volinfo);
+ if ((*index) == 0)
+ (*index)++;
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (gf_uuid_is_null (brickinfo->uuid))
- (void)glusterd_resolve_brick (brickinfo);
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_is_null(brickinfo->uuid))
+ (void)glusterd_resolve_brick(brickinfo);
- if ((!gf_uuid_compare (MY_UUID, brickinfo->uuid)) &&
- (!strncmp (brickinfo->path, path, strlen(path)))) {
- _add_hxlator_to_dict (dict, volinfo,
- ((*index) - 1)/hxl_children, 0);
- return 1;
- }
- (*index)++;
+ if ((!gf_uuid_compare(MY_UUID, brickinfo->uuid)) &&
+ (!strncmp(brickinfo->path, path, strlen(path)))) {
+ _add_hxlator_to_dict(dict, volinfo, ((*index) - 1) / hxl_children,
+ 0);
+ return 1;
}
+ (*index)++;
+ }
- return 0;
+ return 0;
}
void
-_select_hxlators_with_local_bricks (xlator_t *this, glusterd_volinfo_t *volinfo,
- dict_t *dict, int *index,
- int *hxlator_count)
+_select_hxlators_with_local_bricks(xlator_t *this, glusterd_volinfo_t *volinfo,
+ dict_t *dict, int *index, int *hxlator_count)
{
- glusterd_brickinfo_t *brickinfo = NULL;
- int hxl_children = 0;
- gf_boolean_t add = _gf_false;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int hxl_children = 0;
+ gf_boolean_t add = _gf_false;
- hxl_children = _get_hxl_children_count (volinfo);
-
- if ((*index) == 0)
- (*index)++;
+ hxl_children = _get_hxl_children_count(volinfo);
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (gf_uuid_is_null (brickinfo->uuid))
- (void)glusterd_resolve_brick (brickinfo);
+ if ((*index) == 0)
+ (*index)++;
- if (!gf_uuid_compare (MY_UUID, brickinfo->uuid))
- add = _gf_true;
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_is_null(brickinfo->uuid))
+ (void)glusterd_resolve_brick(brickinfo);
- if ((*index) % hxl_children == 0) {
- if (add) {
- _add_hxlator_to_dict (dict, volinfo,
- ((*index) - 1)/hxl_children,
- (*hxlator_count));
- (*hxlator_count)++;
- }
- add = _gf_false;
- }
+ if (!gf_uuid_compare(MY_UUID, brickinfo->uuid))
+ add = _gf_true;
- (*index)++;
+ if ((*index) % hxl_children == 0) {
+ if (add) {
+ _add_hxlator_to_dict(dict, volinfo,
+ ((*index) - 1) / hxl_children,
+ (*hxlator_count));
+ (*hxlator_count)++;
+ }
+ add = _gf_false;
}
+ (*index)++;
+ }
}
int
-_select_hxlators_for_full_self_heal (xlator_t *this,
- glusterd_volinfo_t *volinfo,
- dict_t *dict, int *index,
- int *hxlator_count)
+_select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo,
+ dict_t *dict, int *index,
+ int *hxlator_count)
{
- glusterd_brickinfo_t *brickinfo = NULL;
- int hxl_children = 0;
- uuid_t candidate = {0};
-
- if ((*index) == 0)
- (*index)++;
- if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
- hxl_children = volinfo->disperse_count;
- } else {
- hxl_children = volinfo->replica_count;
- }
-
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (gf_uuid_is_null (brickinfo->uuid))
- (void)glusterd_resolve_brick (brickinfo);
-
- if (gf_uuid_compare (brickinfo->uuid, candidate) > 0)
- gf_uuid_copy (candidate, brickinfo->uuid);
-
- if ((*index) % hxl_children == 0) {
- if (!gf_uuid_compare (MY_UUID, candidate)) {
- _add_hxlator_to_dict (dict, volinfo,
- ((*index)-1)/hxl_children,
- (*hxlator_count));
- (*hxlator_count)++;
- }
- gf_uuid_clear (candidate);
- }
-
- (*index)++;
- }
- return *hxlator_count;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int hxl_children = 0;
+ uuid_t candidate = {0};
+
+ if ((*index) == 0)
+ (*index)++;
+ if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
+ hxl_children = volinfo->disperse_count;
+ } else {
+ hxl_children = volinfo->replica_count;
+ }
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_is_null(brickinfo->uuid))
+ (void)glusterd_resolve_brick(brickinfo);
+
+ if (gf_uuid_compare(brickinfo->uuid, candidate) > 0)
+ gf_uuid_copy(candidate, brickinfo->uuid);
+
+ if ((*index) % hxl_children == 0) {
+ if (!gf_uuid_compare(MY_UUID, candidate)) {
+ _add_hxlator_to_dict(dict, volinfo,
+ ((*index) - 1) / hxl_children,
+ (*hxlator_count));
+ (*hxlator_count)++;
+ }
+ gf_uuid_clear(candidate);
+ }
+
+ (*index)++;
+ }
+ return *hxlator_count;
}
-
static int
-glusterd_bricks_select_snap (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
+glusterd_bricks_select_snap(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected)
{
- int ret = -1;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- glusterd_pending_node_t *pending_node = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char *volname = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- int brick_index = -1;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get"
- " volname");
- goto out;
- }
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret)
- goto out;
-
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- brick_index++;
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID) ||
- !glusterd_is_brick_started (brickinfo)) {
- continue;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- pending_node->index = brick_index;
- cds_list_add_tail (&pending_node->list, selected);
- pending_node = NULL;
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ glusterd_pending_node_t *pending_node = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *volname = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int brick_index = -1;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get"
+ " volname");
+ goto out;
+ }
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret)
+ goto out;
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ brick_index++;
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
+ !glusterd_is_brick_started(brickinfo)) {
+ continue;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
}
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ pending_node->index = brick_index;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ }
- ret = 0;
+ ret = 0;
out:
- gf_msg_debug (THIS->name, 0, "Returning ret %d", ret);
- return ret;
+ gf_msg_debug(THIS->name, 0, "Returning ret %d", ret);
+ return ret;
}
static int
-fill_shd_status_for_local_bricks (dict_t *dict, glusterd_volinfo_t *volinfo,
- cli_cmd_type type, int *index,
- dict_t *req_dict)
+fill_shd_status_for_local_bricks(dict_t *dict, glusterd_volinfo_t *volinfo,
+ cli_cmd_type type, int *index,
+ dict_t *req_dict)
{
- glusterd_brickinfo_t *brickinfo = NULL;
- char *msg = "self-heal-daemon is not running on";
- char key[1024] = {0,};
- int keylen;
- char value[1024] = {0,};
- int ret = 0;
- xlator_t *this = NULL;
- int cmd_replica_index = -1;
-
- this = THIS;
-
- if (type == PER_HEAL_XL) {
- cmd_replica_index = get_replica_index_for_per_replica_cmd
- (volinfo, req_dict);
- if (cmd_replica_index == -1) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_REPLICA_INDEX_GET_FAIL,
- "Could not find the "
- "replica index for per replica type command");
- ret = -1;
- goto out;
- }
+ glusterd_brickinfo_t *brickinfo = NULL;
+ char *msg = "self-heal-daemon is not running on";
+ char key[1024] = {
+ 0,
+ };
+ int keylen;
+ char value[1024] = {
+ 0,
+ };
+ int ret = 0;
+ xlator_t *this = NULL;
+ int cmd_replica_index = -1;
+
+ this = THIS;
+
+ if (type == PER_HEAL_XL) {
+ cmd_replica_index = get_replica_index_for_per_replica_cmd(volinfo,
+ req_dict);
+ if (cmd_replica_index == -1) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_REPLICA_INDEX_GET_FAIL,
+ "Could not find the "
+ "replica index for per replica type command");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_is_null(brickinfo->uuid))
+ (void)glusterd_resolve_brick(brickinfo);
+
+ if (gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
+ (*index)++;
+ continue;
}
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (gf_uuid_is_null (brickinfo->uuid))
- (void)glusterd_resolve_brick (brickinfo);
-
- if (gf_uuid_compare (MY_UUID, brickinfo->uuid)) {
- (*index)++;
- continue;
- }
-
- if (type == PER_HEAL_XL) {
- if (cmd_replica_index != ((*index)/volinfo->replica_count)) {
- (*index)++;
- continue;
- }
-
- }
- keylen = snprintf (key, sizeof (key), "%d-status", (*index));
- snprintf (value, sizeof (value), "%s %s",msg,
- uuid_utoa(MY_UUID));
- ret = dict_set_dynstrn (dict, key, keylen, gf_strdup(value));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Unable to"
- "set the dictionary for shd status msg");
- goto out;
- }
- keylen = snprintf (key, sizeof (key), "%d-shd-status",
- (*index));
- ret = dict_set_nstrn (dict, key, keylen, "off", SLEN ("off"));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED, "Unable to"
- " set dictionary for shd status msg");
- goto out;
- }
-
+ if (type == PER_HEAL_XL) {
+ if (cmd_replica_index != ((*index) / volinfo->replica_count)) {
(*index)++;
+ continue;
+ }
+ }
+ keylen = snprintf(key, sizeof(key), "%d-status", (*index));
+ snprintf(value, sizeof(value), "%s %s", msg, uuid_utoa(MY_UUID));
+ ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(value));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to"
+ "set the dictionary for shd status msg");
+ goto out;
+ }
+ keylen = snprintf(key, sizeof(key), "%d-shd-status", (*index));
+ ret = dict_set_nstrn(dict, key, keylen, "off", SLEN("off"));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Unable to"
+ " set dictionary for shd status msg");
+ goto out;
}
-out:
- return ret;
+ (*index)++;
+ }
+out:
+ return ret;
}
int
-glusterd_shd_select_brick_xlator (dict_t *dict, gf_xl_afr_op_t heal_op,
- glusterd_volinfo_t *volinfo, int *index,
- int *hxlator_count, dict_t *rsp_dict)
+glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
+ glusterd_volinfo_t *volinfo, int *index,
+ int *hxlator_count, dict_t *rsp_dict)
{
- int ret = -1;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
- switch (heal_op) {
+ switch (heal_op) {
case GF_SHD_OP_INDEX_SUMMARY:
case GF_SHD_OP_STATISTICS_HEAL_COUNT:
- if (!priv->shd_svc.online) {
+ if (!priv->shd_svc.online) {
if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OPCTX_NULL, "Received "
- "empty ctx.");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
+ "Received "
+ "empty ctx.");
+ goto out;
}
- ret = fill_shd_status_for_local_bricks (rsp_dict,
- volinfo,
- ALL_HEAL_XL,
- index,
- dict);
+ ret = fill_shd_status_for_local_bricks(
+ rsp_dict, volinfo, ALL_HEAL_XL, index, dict);
if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SHD_STATUS_SET_FAIL, "Unable to "
- "fill the shd status for the local "
- "bricks");
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SHD_STATUS_SET_FAIL,
+ "Unable to "
+ "fill the shd status for the local "
+ "bricks");
goto out;
- }
- break;
+ }
+ break;
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
- if (!priv->shd_svc.online) {
+ if (!priv->shd_svc.online) {
if (!rsp_dict) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OPCTX_NULL, "Received "
- "empty ctx.");
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
+ "Received "
+ "empty ctx.");
+ goto out;
}
- ret = fill_shd_status_for_local_bricks (rsp_dict,
- volinfo,
- PER_HEAL_XL,
- index,
- dict);
+ ret = fill_shd_status_for_local_bricks(
+ rsp_dict, volinfo, PER_HEAL_XL, index, dict);
if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SHD_STATUS_SET_FAIL, "Unable to "
- "fill the shd status for the local"
- " bricks.");
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_SHD_STATUS_SET_FAIL,
+ "Unable to "
+ "fill the shd status for the local"
+ " bricks.");
goto out;
-
- }
- break;
+ }
+ break;
default:
- break;
- }
-
+ break;
+ }
- switch (heal_op) {
+ switch (heal_op) {
case GF_SHD_OP_HEAL_FULL:
- _select_hxlators_for_full_self_heal (this, volinfo, dict,
- index, hxlator_count);
- break;
+ _select_hxlators_for_full_self_heal(this, volinfo, dict, index,
+ hxlator_count);
+ break;
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
- (*hxlator_count) += _select_hxlator_with_matching_brick (this,
- volinfo,
- dict,
- index);
- break;
+ (*hxlator_count) += _select_hxlator_with_matching_brick(
+ this, volinfo, dict, index);
+ break;
default:
- _select_hxlators_with_local_bricks (this, volinfo, dict,
- index, hxlator_count);
- break;
- }
- ret = (*hxlator_count);
+ _select_hxlators_with_local_bricks(this, volinfo, dict, index,
+ hxlator_count);
+ break;
+ }
+ ret = (*hxlator_count);
out:
- return ret;
+ return ret;
}
-
static int
-glusterd_bricks_select_heal_volume (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected,
- dict_t *rsp_dict)
+glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected,
+ dict_t *rsp_dict)
{
- int ret = -1;
- char *volname = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_volinfo_t *dup_volinfo = NULL;
- xlator_t *this = NULL;
- char msg[2048] = {0,};
- glusterd_pending_node_t *pending_node = NULL;
- gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
- int hxlator_count = 0;
- int index = 0;
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "volume name get failed");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof (msg), "Volume %s does not exist",
- volname);
-
- *op_errstr = gf_strdup (msg);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", msg);
- goto out;
- }
-
- ret = dict_get_int32n (dict, "heal-op", SLEN ("heal-op"),
- (int32_t *)&heal_op);
- if (ret || (heal_op == GF_SHD_OP_INVALID)) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "heal op invalid");
- goto out;
- }
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_create_sub_tier_volinfo (volinfo, &dup_volinfo,
- _gf_false, volname);
- if (ret < 0)
- goto out;
-
- ret = glusterd_shd_select_brick_xlator (dict, heal_op,
- dup_volinfo,
- &index, &hxlator_count,
- rsp_dict);
- glusterd_volinfo_delete (dup_volinfo);
- if (ret < 0)
- goto out;
- ret = glusterd_create_sub_tier_volinfo (volinfo, &dup_volinfo,
- _gf_true, volname);
- if (ret < 0)
- goto out;
- ret = glusterd_shd_select_brick_xlator (dict, heal_op,
- dup_volinfo,
- &index, &hxlator_count,
- rsp_dict);
- glusterd_volinfo_delete (dup_volinfo);
- if (ret < 0)
- goto out;
- } else {
- ret = glusterd_shd_select_brick_xlator (dict, heal_op,
- volinfo,
- &index, &hxlator_count,
- rsp_dict);
- if (ret < 0)
- goto out;
- }
-
- if (!hxlator_count)
- goto out;
- if (hxlator_count == -1) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_XLATOR_COUNT_GET_FAIL, "Could not determine the"
- "translator count");
- ret = -1;
- goto out;
- }
-
- ret = dict_set_int32n (dict, "count", SLEN ("count"),
- hxlator_count);
- if (ret)
- goto out;
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = &(priv->shd_svc);
- pending_node->type = GD_NODE_SHD;
- cds_list_add_tail (&pending_node->list, selected);
- pending_node = NULL;
- }
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *dup_volinfo = NULL;
+ xlator_t *this = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ glusterd_pending_node_t *pending_node = NULL;
+ gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
+ int hxlator_count = 0;
+ int index = 0;
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "volume name get failed");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
+
+ *op_errstr = gf_strdup(msg);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
+ goto out;
+ }
+
+ ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"),
+ (int32_t *)&heal_op);
+ if (ret || (heal_op == GF_SHD_OP_INVALID)) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "heal op invalid");
+ goto out;
+ }
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo, _gf_false,
+ volname);
+ if (ret < 0)
+ goto out;
+
+ ret = glusterd_shd_select_brick_xlator(
+ dict, heal_op, dup_volinfo, &index, &hxlator_count, rsp_dict);
+ glusterd_volinfo_delete(dup_volinfo);
+ if (ret < 0)
+ goto out;
+ ret = glusterd_create_sub_tier_volinfo(volinfo, &dup_volinfo, _gf_true,
+ volname);
+ if (ret < 0)
+ goto out;
+ ret = glusterd_shd_select_brick_xlator(
+ dict, heal_op, dup_volinfo, &index, &hxlator_count, rsp_dict);
+ glusterd_volinfo_delete(dup_volinfo);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = glusterd_shd_select_brick_xlator(dict, heal_op, volinfo, &index,
+ &hxlator_count, rsp_dict);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (!hxlator_count)
+ goto out;
+ if (hxlator_count == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_XLATOR_COUNT_GET_FAIL,
+ "Could not determine the"
+ "translator count");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_int32n(dict, "count", SLEN("count"), hxlator_count);
+ if (ret)
+ goto out;
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ } else {
+ pending_node->node = &(priv->shd_svc);
+ pending_node->type = GD_NODE_SHD;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ }
out:
- gf_msg_debug (THIS->name, 0, "Returning ret %d", ret);
- return ret;
-
+ gf_msg_debug(THIS->name, 0, "Returning ret %d", ret);
+ return ret;
}
int
-glusterd_bricks_select_tier_volume (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
+glusterd_bricks_select_tier_volume(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected)
{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char msg[2048] = {0,};
- glusterd_pending_node_t *pending_node = NULL;
- glusterd_brickinfo_t *brick = NULL;
- gf_boolean_t retval = _gf_false;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO (THIS->name, this, out);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "volume name get failed");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof (msg), "Volume %s does not exist",
- volname);
-
- *op_errstr = gf_strdup (msg);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", msg);
- goto out;
- }
- /*check if this node needs tierd*/
- cds_list_for_each_entry (brick, &volinfo->bricks, brick_list) {
- if (gf_uuid_compare (MY_UUID, brick->uuid) == 0) {
- retval = _gf_true;
- break;
- }
- }
-
- if (!retval)
- goto out;
-
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = volinfo;
- pending_node->type = GD_NODE_TIERD;
- cds_list_add_tail (&pending_node->list, selected);
- pending_node = NULL;
- }
- ret = 0;
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ glusterd_pending_node_t *pending_node = NULL;
+ glusterd_brickinfo_t *brick = NULL;
+ gf_boolean_t retval = _gf_false;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(THIS->name, this, out);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "volume name get failed");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
+
+ *op_errstr = gf_strdup(msg);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
+ goto out;
+ }
+ /*check if this node needs tierd*/
+ cds_list_for_each_entry(brick, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_compare(MY_UUID, brick->uuid) == 0) {
+ retval = _gf_true;
+ break;
+ }
+ }
+
+ if (!retval)
+ goto out;
+
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ } else {
+ pending_node->node = volinfo;
+ pending_node->type = GD_NODE_TIERD;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ }
+ ret = 0;
out:
- return ret;
+ return ret;
}
int
-glusterd_bricks_select_rebalance_volume (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
+glusterd_bricks_select_rebalance_volume(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected)
{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char msg[2048] = {0,};
- glusterd_pending_node_t *pending_node = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "volume name get failed");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof (msg), "Volume %s does not exist",
- volname);
-
- *op_errstr = gf_strdup (msg);
- gf_msg ("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "%s", msg);
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- } else {
- pending_node->node = volinfo;
- pending_node->type = GD_NODE_REBALANCE;
- cds_list_add_tail (&pending_node->list, selected);
- pending_node = NULL;
- }
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ glusterd_pending_node_t *pending_node = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "volume name get failed");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
+
+ *op_errstr = gf_strdup(msg);
+ gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ } else {
+ pending_node->node = volinfo;
+ pending_node->type = GD_NODE_REBALANCE;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
+glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected)
{
- int ret = -1;
- int cmd = 0;
- int brick_index = -1;
- char *volname = NULL;
- char *brickname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_pending_node_t *pending_node = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
-
- GF_ASSERT (dict);
-
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
-
- ret = dict_get_int32n (dict, "cmd", SLEN ("cmd"), &cmd);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get status type");
- goto out;
- }
-
- if (cmd & GF_CLI_STATUS_ALL)
- goto out;
-
- switch (cmd & GF_CLI_STATUS_MASK) {
+ int ret = -1;
+ int cmd = 0;
+ int brick_index = -1;
+ char *volname = NULL;
+ char *brickname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_pending_node_t *pending_node = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ GF_ASSERT(dict);
+
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ ret = dict_get_int32n(dict, "cmd", SLEN("cmd"), &cmd);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get status type");
+ goto out;
+ }
+
+ if (cmd & GF_CLI_STATUS_ALL)
+ goto out;
+
+ switch (cmd & GF_CLI_STATUS_MASK) {
case GF_CLI_STATUS_MEM:
case GF_CLI_STATUS_CLIENTS:
case GF_CLI_STATUS_INODE:
@@ -7464,1125 +7153,1117 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_BITD:
case GF_CLI_STATUS_SCRUB:
case GF_CLI_STATUS_CLIENT_LIST:
- break;
+ break;
default:
- goto out;
- }
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get volname");
- goto out;
- }
- ret = glusterd_volinfo_find (volname, &volinfo);
+ goto out;
+ }
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volname");
+ goto out;
+ }
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
+ ret = dict_get_strn(dict, "brick", SLEN("brick"), &brickname);
if (ret) {
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get brick");
+ goto out;
}
+ ret = glusterd_volume_brickinfo_get_by_brick(brickname, volinfo,
+ &brickinfo, _gf_false);
+ if (ret)
+ goto out;
- if ( (cmd & GF_CLI_STATUS_BRICK) != 0) {
- ret = dict_get_strn (dict, "brick", SLEN ("brick"),
- &brickname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED,
- "Unable to get brick");
- goto out;
- }
- ret = glusterd_volume_brickinfo_get_by_brick (brickname,
- volinfo,
- &brickinfo,
- _gf_false);
- if (ret)
- goto out;
-
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID)||
- !glusterd_is_brick_started (brickinfo))
- goto out;
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
+ !glusterd_is_brick_started(brickinfo))
+ goto out;
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- pending_node->index = 0;
- cds_list_add_tail (&pending_node->list, selected);
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ pending_node->index = 0;
+ cds_list_add_tail(&pending_node->list, selected);
- ret = 0;
- } else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
- if (!priv->nfs_svc.online) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_NFS_SERVER_NOT_RUNNING,
- "NFS server is not running");
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = &(priv->nfs_svc);
- pending_node->type = GD_NODE_NFS;
- pending_node->index = 0;
- cds_list_add_tail (&pending_node->list, selected);
+ ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
+ if (!priv->nfs_svc.online) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NFS_SERVER_NOT_RUNNING,
+ "NFS server is not running");
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->nfs_svc);
+ pending_node->type = GD_NODE_NFS;
+ pending_node->index = 0;
+ cds_list_add_tail(&pending_node->list, selected);
- ret = 0;
- } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
- if (!priv->shd_svc.online) {
- ret = -1;
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SELF_HEALD_DISABLED,
- "Self-heal daemon is not running");
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = &(priv->shd_svc);
- pending_node->type = GD_NODE_SHD;
- pending_node->index = 0;
- cds_list_add_tail (&pending_node->list, selected);
+ ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
+ if (!priv->shd_svc.online) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SELF_HEALD_DISABLED,
+ "Self-heal daemon is not running");
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->shd_svc);
+ pending_node->type = GD_NODE_SHD;
+ pending_node->index = 0;
+ cds_list_add_tail(&pending_node->list, selected);
- ret = 0;
- } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
- if (!priv->quotad_svc.online) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_QUOTAD_NOT_RUNNING, "Quotad is not "
- "running");
- ret = -1;
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = &(priv->quotad_svc);
- pending_node->type = GD_NODE_QUOTAD;
- pending_node->index = 0;
- cds_list_add_tail (&pending_node->list, selected);
+ ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
+ if (!priv->quotad_svc.online) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_QUOTAD_NOT_RUNNING,
+ "Quotad is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->quotad_svc);
+ pending_node->type = GD_NODE_QUOTAD;
+ pending_node->index = 0;
+ cds_list_add_tail(&pending_node->list, selected);
- ret = 0;
- } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
- if (!priv->bitd_svc.online) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BITROT_NOT_RUNNING, "Bitrot is not "
- "running");
- ret = -1;
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = &(priv->bitd_svc);
- pending_node->type = GD_NODE_BITD;
- pending_node->index = 0;
- cds_list_add_tail (&pending_node->list, selected);
+ ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
+ if (!priv->bitd_svc.online) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITROT_NOT_RUNNING,
+ "Bitrot is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->bitd_svc);
+ pending_node->type = GD_NODE_BITD;
+ pending_node->index = 0;
+ cds_list_add_tail(&pending_node->list, selected);
- ret = 0;
- } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
- if (!priv->scrub_svc.online) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SCRUBBER_NOT_RUNNING, "Scrubber is not "
- "running");
- ret = -1;
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = &(priv->scrub_svc);
- pending_node->type = GD_NODE_SCRUB;
- pending_node->index = 0;
- cds_list_add_tail (&pending_node->list, selected);
+ ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
+ if (!priv->scrub_svc.online) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SCRUBBER_NOT_RUNNING,
+ "Scrubber is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->scrub_svc);
+ pending_node->type = GD_NODE_SCRUB;
+ pending_node->index = 0;
+ cds_list_add_tail(&pending_node->list, selected);
- ret = 0;
- } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
- if (!volinfo->tierd.svc.online) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TIERD_NOT_RUNNING, "tierd is not "
- "running");
- ret = -1;
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY, "failed to allocate "
- "memory for pending node");
- ret = -1;
- goto out;
- }
+ ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_TIERD) != 0) {
+ if (!volinfo->tierd.svc.online) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TIERD_NOT_RUNNING,
+ "tierd is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "failed to allocate "
+ "memory for pending node");
+ ret = -1;
+ goto out;
+ }
- pending_node->node = (void *)(&volinfo->tierd);
- pending_node->type = GD_NODE_TIERD;
- pending_node->index = 0;
- cds_list_add_tail (&pending_node->list, selected);
+ pending_node->node = (void *)(&volinfo->tierd);
+ pending_node->type = GD_NODE_TIERD;
+ pending_node->index = 0;
+ cds_list_add_tail(&pending_node->list, selected);
- ret = 0;
- } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
- if (!volinfo->snapd.svc.online) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_SNAPD_NOT_RUNNING, "snapd is not "
- "running");
- ret = -1;
- goto out;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- gf_msg (this->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY, "failed to allocate "
- "memory for pending node");
- ret = -1;
- goto out;
- }
+ ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
+ if (!volinfo->snapd.svc.online) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_NOT_RUNNING,
+ "snapd is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "failed to allocate "
+ "memory for pending node");
+ ret = -1;
+ goto out;
+ }
- pending_node->node = (void *)(&volinfo->snapd);
- pending_node->type = GD_NODE_SNAPD;
- pending_node->index = 0;
- cds_list_add_tail (&pending_node->list, selected);
+ pending_node->node = (void *)(&volinfo->snapd);
+ pending_node->type = GD_NODE_SNAPD;
+ pending_node->index = 0;
+ cds_list_add_tail(&pending_node->list, selected);
- ret = 0;
- } else {
- cds_list_for_each_entry (brickinfo, &volinfo->bricks,
- brick_list) {
- brick_index++;
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID) ||
- !glusterd_is_brick_started (brickinfo)) {
- continue;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- gf_msg (THIS->name, GF_LOG_ERROR, ENOMEM,
- GD_MSG_NO_MEMORY,
- "Unable to allocate memory");
- goto out;
- }
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- pending_node->index = brick_index;
- cds_list_add_tail (&pending_node->list, selected);
- pending_node = NULL;
- }
+ ret = 0;
+ } else {
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ brick_index++;
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
+ !glusterd_is_brick_started(brickinfo)) {
+ continue;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
+ "Unable to allocate memory");
+ goto out;
+ }
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ pending_node->index = brick_index;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
}
+ }
out:
- return ret;
+ return ret;
}
static int
-glusterd_bricks_select_scrub (dict_t *dict, char **op_errstr,
- struct cds_list_head *selected)
+glusterd_bricks_select_scrub(dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected)
{
- int ret = -1;
- char *volname = NULL;
- char msg[2048] = {0,};
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_pending_node_t *pending_node = NULL;
-
- this = THIS;
- priv = this->private;
- GF_ASSERT (this);
- GF_ASSERT (priv);
-
- GF_ASSERT (dict);
-
- ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Unable to get"
- " volname");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof (msg), "Volume %s does not exist",
- volname);
-
- *op_errstr = gf_strdup (msg);
- gf_msg (this->name, GF_LOG_ERROR, EINVAL,
- GD_MSG_VOL_NOT_FOUND, "%s", msg);
- goto out;
- }
+ int ret = -1;
+ char *volname = NULL;
+ char msg[2048] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_pending_node_t *pending_node = NULL;
+
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT(this);
+ GF_ASSERT(priv);
+
+ GF_ASSERT(dict);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get"
+ " volname");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
+
+ *op_errstr = gf_strdup(msg);
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
+ msg);
+ goto out;
+ }
+
+ if (!priv->scrub_svc.online) {
+ ret = 0;
+ snprintf(msg, sizeof(msg), "Scrubber daemon is not running");
- if (!priv->scrub_svc.online) {
- ret = 0;
- snprintf (msg, sizeof (msg), "Scrubber daemon is not running");
+ gf_msg_debug(this->name, 0, "%s", msg);
+ goto out;
+ }
- gf_msg_debug (this->name, 0, "%s", msg);
- goto out;
- }
-
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
- pending_node->node = &(priv->scrub_svc);
- pending_node->type = GD_NODE_SCRUB;
- cds_list_add_tail (&pending_node->list, selected);
- pending_node = NULL;
+ pending_node->node = &(priv->scrub_svc);
+ pending_node->type = GD_NODE_SCRUB;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
out:
- gf_msg_debug (this->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
+ return ret;
}
/* Select the bricks to send the barrier request to.
* This selects the bricks of the given volume which are present on this peer
* and are running
*/
static int
-glusterd_bricks_select_barrier (dict_t *dict, struct cds_list_head *selected)
+glusterd_bricks_select_barrier(dict_t *dict, struct cds_list_head *selected)
{
- int ret = -1;
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_pending_node_t *pending_node = NULL;
-
- GF_ASSERT (dict);
-
- ret = dict_get_strn (dict, "volname", SLEN("volname"), &volname);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_GET_FAILED, "Failed to get volname");
- goto out;
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_msg (THIS->name, GF_LOG_ERROR, 0,
- GD_MSG_VOL_NOT_FOUND, "Failed to find volume %s",
- volname);
- goto out;
- }
-
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (gf_uuid_compare (brickinfo->uuid, MY_UUID) ||
- !glusterd_is_brick_started (brickinfo)) {
- continue;
- }
- pending_node = GF_CALLOC (1, sizeof (*pending_node),
- gf_gld_mt_pending_node_t);
- if (!pending_node) {
- ret = -1;
- goto out;
- }
- pending_node->node = brickinfo;
- pending_node->type = GD_NODE_BRICK;
- cds_list_add_tail (&pending_node->list, selected);
- pending_node = NULL;
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_pending_node_t *pending_node = NULL;
+
+ GF_ASSERT(dict);
+
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Failed to get volname");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Failed to find volume %s", volname);
+ goto out;
+ }
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
+ !glusterd_is_brick_started(brickinfo)) {
+ continue;
+ }
+ pending_node = GF_CALLOC(1, sizeof(*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
}
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ cds_list_add_tail(&pending_node->list, selected);
+ pending_node = NULL;
+ }
out:
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ return ret;
}
static int
-glusterd_op_ac_send_brick_op (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = 0;
- rpc_clnt_procedure_t *proc = NULL;
- glusterd_conf_t *priv = NULL;
- xlator_t *this = NULL;
- glusterd_op_t op = GD_OP_NONE;
- glusterd_req_ctx_t *req_ctx = NULL;
- char *op_errstr = NULL;
-
- this = THIS;
- priv = this->private;
-
- if (ctx) {
- req_ctx = ctx;
- } else {
- req_ctx = GF_CALLOC (1, sizeof (*req_ctx),
- gf_gld_mt_op_allack_ctx_t);
- op = glusterd_op_get_op ();
- req_ctx->op = op;
- gf_uuid_copy (req_ctx->uuid, MY_UUID);
- ret = glusterd_op_build_payload (&req_ctx->dict, &op_errstr,
- NULL);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
- LOGSTR_BUILD_PAYLOAD,
- gd_op_list[op]);
- if (op_errstr == NULL)
- gf_asprintf (&op_errstr,
- OPERRSTR_BUILD_PAYLOAD);
- opinfo.op_errstr = op_errstr;
- GF_FREE (req_ctx);
- goto out;
- }
- }
-
- proc = &priv->gfs_mgmt->proctable[GLUSTERD_BRICK_OP];
- if (proc->fn) {
- ret = proc->fn (NULL, this, req_ctx);
- if (ret)
- goto out;
- }
+ int ret = 0;
+ rpc_clnt_procedure_t *proc = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+ glusterd_op_t op = GD_OP_NONE;
+ glusterd_req_ctx_t *req_ctx = NULL;
+ char *op_errstr = NULL;
+
+ this = THIS;
+ priv = this->private;
+
+ if (ctx) {
+ req_ctx = ctx;
+ } else {
+ req_ctx = GF_CALLOC(1, sizeof(*req_ctx), gf_gld_mt_op_allack_ctx_t);
+ op = glusterd_op_get_op();
+ req_ctx->op = op;
+ gf_uuid_copy(req_ctx->uuid, MY_UUID);
+ ret = glusterd_op_build_payload(&req_ctx->dict, &op_errstr, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL, LOGSTR_BUILD_PAYLOAD,
+ gd_op_list[op]);
+ if (op_errstr == NULL)
+ gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
+ opinfo.op_errstr = op_errstr;
+ GF_FREE(req_ctx);
+ goto out;
+ }
+ }
+
+ proc = &priv->gfs_mgmt->proctable[GLUSTERD_BRICK_OP];
+ if (proc->fn) {
+ ret = proc->fn(NULL, this, req_ctx);
+ if (ret)
+ goto out;
+ }
- if (!opinfo.pending_count && !opinfo.brick_pending_count) {
- glusterd_clear_pending_nodes (&opinfo.pending_bricks);
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK,
- &event->txn_id, req_ctx);
- }
+ if (!opinfo.pending_count && !opinfo.brick_pending_count) {
+ glusterd_clear_pending_nodes(&opinfo.pending_bricks);
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
+ req_ctx);
+ }
out:
- gf_msg_debug (this->name, 0, "Returning with %d", ret);
+ gf_msg_debug(this->name, 0, "Returning with %d", ret);
- return ret;
+ return ret;
}
-
static int
-glusterd_op_ac_rcvd_brick_op_acc (glusterd_op_sm_event_t *event, void *ctx)
+glusterd_op_ac_rcvd_brick_op_acc(glusterd_op_sm_event_t *event, void *ctx)
{
- int ret = -1;
- glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
- char *op_errstr = NULL;
- glusterd_op_t op = GD_OP_NONE;
- gd_node_type type = GD_NODE_NONE;
- dict_t *op_ctx = NULL;
- glusterd_req_ctx_t *req_ctx = NULL;
- void *pending_entry = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
- GF_VALIDATE_OR_GOTO (this->name, event, out);
- GF_VALIDATE_OR_GOTO (this->name, ctx, out);
- ev_ctx = ctx;
- GF_VALIDATE_OR_GOTO(this->name, ev_ctx, out);
-
- req_ctx = ev_ctx->commit_ctx;
- GF_VALIDATE_OR_GOTO (this->name, req_ctx, out);
-
- op = req_ctx->op;
- op_ctx = glusterd_op_get_ctx ();
- pending_entry = ev_ctx->pending_node->node;
- type = ev_ctx->pending_node->type;
-
- ret = glusterd_remove_pending_entry (&opinfo.pending_bricks,
- pending_entry);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_UNKNOWN_RESPONSE, "unknown response received ");
- ret = -1;
- goto out;
- }
-
- if (opinfo.brick_pending_count > 0)
- opinfo.brick_pending_count--;
-
+ int ret = -1;
+ glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
+ char *op_errstr = NULL;
+ glusterd_op_t op = GD_OP_NONE;
+ gd_node_type type = GD_NODE_NONE;
+ dict_t *op_ctx = NULL;
+ glusterd_req_ctx_t *req_ctx = NULL;
+ void *pending_entry = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, event, out);
+ GF_VALIDATE_OR_GOTO(this->name, ctx, out);
+ ev_ctx = ctx;
+ GF_VALIDATE_OR_GOTO(this->name, ev_ctx, out);
+
+ req_ctx = ev_ctx->commit_ctx;
+ GF_VALIDATE_OR_GOTO(this->name, req_ctx, out);
+
+ op = req_ctx->op;
+ op_ctx = glusterd_op_get_ctx();
+ pending_entry = ev_ctx->pending_node->node;
+ type = ev_ctx->pending_node->type;
+
+ ret = glusterd_remove_pending_entry(&opinfo.pending_bricks, pending_entry);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_RESPONSE,
+ "unknown response received ");
+ ret = -1;
+ goto out;
+ }
- ret = glusterd_set_txn_opinfo (&event->txn_id, &opinfo);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
+ if (opinfo.brick_pending_count > 0)
+ opinfo.brick_pending_count--;
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
- glusterd_handle_node_rsp (req_ctx->dict, pending_entry, op, ev_ctx->rsp_dict,
- op_ctx, &op_errstr, type);
+ glusterd_handle_node_rsp(req_ctx->dict, pending_entry, op, ev_ctx->rsp_dict,
+ op_ctx, &op_errstr, type);
- if (opinfo.brick_pending_count > 0)
- goto out;
+ if (opinfo.brick_pending_count > 0)
+ goto out;
- ret = glusterd_op_sm_inject_event (GD_OP_EVENT_ALL_ACK, &event->txn_id,
- ev_ctx->commit_ctx);
+ ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
+ ev_ctx->commit_ctx);
out:
- if (ev_ctx && ev_ctx->rsp_dict)
- dict_unref (ev_ctx->rsp_dict);
- GF_FREE (ev_ctx);
- gf_msg_debug (this ? this->name : "glusterd", 0, "Returning %d", ret);
- return ret;
+ if (ev_ctx && ev_ctx->rsp_dict)
+ dict_unref(ev_ctx->rsp_dict);
+ GF_FREE(ev_ctx);
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
+ return ret;
}
int32_t
-glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr,
- struct cds_list_head *selected, dict_t *rsp_dict)
+glusterd_op_bricks_select(glusterd_op_t op, dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected, dict_t *rsp_dict)
{
- int ret = 0;
+ int ret = 0;
- GF_ASSERT (dict);
- GF_ASSERT (op_errstr);
- GF_ASSERT (op > GD_OP_NONE);
- GF_ASSERT (op < GD_OP_MAX);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(op > GD_OP_NONE);
+ GF_ASSERT(op < GD_OP_MAX);
- switch (op) {
+ switch (op) {
case GD_OP_STOP_VOLUME:
- ret = glusterd_bricks_select_stop_volume (dict, op_errstr,
- selected);
- break;
+ ret = glusterd_bricks_select_stop_volume(dict, op_errstr, selected);
+ break;
case GD_OP_REMOVE_BRICK:
- ret = glusterd_bricks_select_remove_brick (dict, op_errstr,
- selected);
- break;
+ ret = glusterd_bricks_select_remove_brick(dict, op_errstr,
+ selected);
+ break;
case GD_OP_PROFILE_VOLUME:
- ret = glusterd_bricks_select_profile_volume (dict, op_errstr,
- selected);
- break;
+ ret = glusterd_bricks_select_profile_volume(dict, op_errstr,
+ selected);
+ break;
case GD_OP_HEAL_VOLUME:
- ret = glusterd_bricks_select_heal_volume (dict, op_errstr,
- selected, rsp_dict);
- break;
+ ret = glusterd_bricks_select_heal_volume(dict, op_errstr, selected,
+ rsp_dict);
+ break;
case GD_OP_STATUS_VOLUME:
- ret = glusterd_bricks_select_status_volume (dict, op_errstr,
- selected);
- break;
+ ret = glusterd_bricks_select_status_volume(dict, op_errstr,
+ selected);
+ break;
case GD_OP_TIER_STATUS:
- ret = glusterd_bricks_select_tier_volume (dict, op_errstr,
- selected);
- break;
+ ret = glusterd_bricks_select_tier_volume(dict, op_errstr, selected);
+ break;
case GD_OP_DETACH_TIER_STATUS:
case GD_OP_DEFRAG_BRICK_VOLUME:
- ret = glusterd_bricks_select_rebalance_volume (dict, op_errstr,
- selected);
- break;
+ ret = glusterd_bricks_select_rebalance_volume(dict, op_errstr,
+ selected);
+ break;
case GD_OP_BARRIER:
- ret = glusterd_bricks_select_barrier (dict, selected);
- break;
+ ret = glusterd_bricks_select_barrier(dict, selected);
+ break;
case GD_OP_SNAP:
- ret = glusterd_bricks_select_snap (dict, op_errstr, selected);
- break;
+ ret = glusterd_bricks_select_snap(dict, op_errstr, selected);
+ break;
case GD_OP_SCRUB_STATUS:
case GD_OP_SCRUB_ONDEMAND:
- ret = glusterd_bricks_select_scrub (dict, op_errstr, selected);
- break;
+ ret = glusterd_bricks_select_scrub(dict, op_errstr, selected);
+ break;
default:
- break;
- }
+ break;
+ }
- gf_msg_debug (THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(THIS->name, 0, "Returning %d", ret);
- return ret;
+ return ret;
}
-glusterd_op_sm_t glusterd_op_state_default [] = {
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_send_lock},//EVENT_START_LOCK
- {GD_OP_STATE_LOCKED, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_RCVD_ACC
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_RCVD_RJT
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_ALL_ACK
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_default[] = {
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_send_lock}, // EVENT_START_LOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_ALL_ACK
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_lock_sent [] = {
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_rcvd_lock_acc}, //EVENT_RCVD_ACC
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_send_stage_op}, //EVENT_ALL_ACC
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_send_unlock_drain}, //EVENT_RCVD_RJT
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_ALL_ACK
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_lock_sent[] = {
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_rcvd_lock_acc}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_send_stage_op}, // EVENT_ALL_ACC
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_ACK_DRAIN,
+ glusterd_op_ac_send_unlock_drain}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, // EVENT_ALL_ACK
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_locked [] = {
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_LOCKED, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_RCVD_ACC
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_RCVD_RJT
- {GD_OP_STATE_STAGED, glusterd_op_ac_stage_op}, //EVENT_STAGE_OP
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_ALL_ACK
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_local_unlock}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_locked[] = {
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGED, glusterd_op_ac_stage_op}, // EVENT_STAGE_OP
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_ALL_ACK
+ {GD_OP_STATE_DEFAULT,
+ glusterd_op_ac_local_unlock}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_LOCKED, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_stage_op_sent [] = {
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_rcvd_stage_op_acc}, //EVENT_RCVD_ACC
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_send_brick_op}, //EVENT_ALL_ACC
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_send_brick_op}, //EVENT_STAGE_ACC
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_stage_op_failed}, //EVENT_RCVD_RJT
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_ALL_ACK
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_stage_op_sent[] = {
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_STAGE_OP_SENT,
+ glusterd_op_ac_rcvd_stage_op_acc}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_send_brick_op}, // EVENT_ALL_ACC
+ {GD_OP_STATE_BRICK_OP_SENT,
+ glusterd_op_ac_send_brick_op}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_STAGE_OP_FAILED,
+ glusterd_op_ac_stage_op_failed}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, // EVENT_ALL_ACK
+ {GD_OP_STATE_STAGE_OP_SENT,
+ glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_stage_op_failed [] = {
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_stage_op_failed}, //EVENT_RCVD_ACC
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_stage_op_failed}, //EVENT_RCVD_RJT
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_ALL_ACK
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_stage_op_failed[] = {
+ {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_STAGE_OP_FAILED,
+ glusterd_op_ac_stage_op_failed}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_STAGE_OP_FAILED,
+ glusterd_op_ac_stage_op_failed}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, // EVENT_ALL_ACK
+ {GD_OP_STATE_STAGE_OP_FAILED,
+ glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_staged [] = {
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_STAGED, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_STAGED, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_RCVD_ACC
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_RCVD_RJT
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_send_brick_op}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_ALL_ACK
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_local_unlock}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_STAGED, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_staged[] = {
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_BRICK_COMMITTED,
+ glusterd_op_ac_send_brick_op}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_ALL_ACK
+ {GD_OP_STATE_DEFAULT,
+ glusterd_op_ac_local_unlock}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_STAGED, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_brick_op_sent [] = {
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_rcvd_brick_op_acc}, //EVENT_RCVD_ACC
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_brick_op_failed}, //EVENT_RCVD_RJT
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, //EVENT_BRICK_OP
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_send_commit_op}, //EVENT_ALL_ACK
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_brick_op_sent[] = {
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_BRICK_OP_SENT,
+ glusterd_op_ac_rcvd_brick_op_acc}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_BRICK_OP_FAILED,
+ glusterd_op_ac_brick_op_failed}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, // EVENT_BRICK_OP
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_COMMIT_OP_SENT,
+ glusterd_op_ac_send_commit_op}, // EVENT_ALL_ACK
+ {GD_OP_STATE_BRICK_OP_SENT,
+ glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_brick_op_failed [] = {
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_brick_op_failed}, //EVENT_RCVD_ACC
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_brick_op_failed}, //EVENT_RCVD_RJT
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, //EVENT_BRICK_OP
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_ALL_ACK
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_brick_op_failed[] = {
+ {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_BRICK_OP_FAILED,
+ glusterd_op_ac_brick_op_failed}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_BRICK_OP_FAILED,
+ glusterd_op_ac_brick_op_failed}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, // EVENT_BRICK_OP
+ {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, // EVENT_ALL_ACK
+ {GD_OP_STATE_BRICK_OP_FAILED,
+ glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_brick_committed [] = {
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_rcvd_brick_op_acc}, //EVENT_RCVD_ACC
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_brick_op_failed}, //EVENT_RCVD_RJT
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_COMMITED, glusterd_op_ac_commit_op}, //EVENT_ALL_ACK
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_local_unlock}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_brick_committed[] = {
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_BRICK_COMMITTED,
+ glusterd_op_ac_rcvd_brick_op_acc}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_BRICK_COMMIT_FAILED,
+ glusterd_op_ac_brick_op_failed}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_commit_op}, // EVENT_ALL_ACK
+ {GD_OP_STATE_DEFAULT,
+ glusterd_op_ac_local_unlock}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_brick_commit_failed [] = {
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_brick_op_failed}, //EVENT_RCVD_ACC
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_brick_op_failed}, //EVENT_RCVD_RJT
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_send_commit_failed}, //EVENT_ALL_ACK
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_local_unlock}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_brick_commit_failed[] = {
+ {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_BRICK_COMMIT_FAILED,
+ glusterd_op_ac_brick_op_failed}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_BRICK_COMMIT_FAILED,
+ glusterd_op_ac_brick_op_failed}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_BRICK_COMMIT_FAILED,
+ glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_BRICK_COMMIT_FAILED,
+ glusterd_op_ac_send_commit_failed}, // EVENT_ALL_ACK
+ {GD_OP_STATE_DEFAULT,
+ glusterd_op_ac_local_unlock}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_commit_op_failed [] = {
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_commit_op_failed}, //EVENT_RCVD_ACC
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_commit_op_failed}, //EVENT_RCVD_RJT
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_ALL_ACK
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_commit_op_failed[] = {
+ {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_COMMIT_OP_FAILED,
+ glusterd_op_ac_commit_op_failed}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_COMMIT_OP_FAILED,
+ glusterd_op_ac_commit_op_failed}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, // EVENT_ALL_ACK
+ {GD_OP_STATE_COMMIT_OP_FAILED,
+ glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_commit_op_sent [] = {
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_rcvd_commit_op_acc}, //EVENT_RCVD_ACC
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_ALL_ACC
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_commit_op_failed}, //EVENT_RCVD_RJT
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_ALL_ACK
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_commit_op_sent[] = {
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_COMMIT_OP_SENT,
+ glusterd_op_ac_rcvd_commit_op_acc}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, // EVENT_ALL_ACC
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_COMMIT_OP_FAILED,
+ glusterd_op_ac_commit_op_failed}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, // EVENT_ALL_ACK
+ {GD_OP_STATE_COMMIT_OP_SENT,
+ glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_committed [] = {
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_COMMITED, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_RCVD_ACC
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_RCVD_RJT
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_ALL_ACK
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_local_unlock}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_committed[] = {
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_ALL_ACK
+ {GD_OP_STATE_DEFAULT,
+ glusterd_op_ac_local_unlock}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_COMMITED, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_unlock_sent [] = {
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_rcvd_unlock_acc}, //EVENT_RCVD_ACC
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlocked_all}, //EVENT_ALL_ACC
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_rcvd_unlock_acc}, //EVENT_RCVD_RJT
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_ALL_ACK
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_unlock_sent[] = {
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_UNLOCK_SENT,
+ glusterd_op_ac_rcvd_unlock_acc}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlocked_all}, // EVENT_ALL_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_UNLOCK_SENT,
+ glusterd_op_ac_rcvd_unlock_acc}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, // EVENT_ALL_ACK
+ {GD_OP_STATE_UNLOCK_SENT,
+ glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t glusterd_op_state_ack_drain [] = {
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_NONE
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},//EVENT_START_LOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_lock}, //EVENT_LOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_send_unlock_drain}, //EVENT_RCVD_ACC
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_ALL_ACC
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_STAGE_ACC
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_COMMIT_ACC
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_send_unlock_drain}, //EVENT_RCVD_RJT
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_STAGE_OP
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_COMMIT_OP
- {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, //EVENT_UNLOCK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_START_UNLOCK
- {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, //EVENT_ALL_ACK
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_LOCAL_UNLOCK_NO_RESP
- {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, //EVENT_MAX
+glusterd_op_sm_t glusterd_op_state_ack_drain[] = {
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_NONE
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_LOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_lock}, // EVENT_LOCK
+ {GD_OP_STATE_ACK_DRAIN,
+ glusterd_op_ac_send_unlock_drain}, // EVENT_RCVD_ACC
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_ALL_ACC
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_STAGE_ACC
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_COMMIT_ACC
+ {GD_OP_STATE_ACK_DRAIN,
+ glusterd_op_ac_send_unlock_drain}, // EVENT_RCVD_RJT
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_STAGE_OP
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_COMMIT_OP
+ {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock}, // EVENT_UNLOCK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_START_UNLOCK
+ {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock}, // EVENT_ALL_ACK
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_LOCAL_UNLOCK_NO_RESP
+ {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none}, // EVENT_MAX
};
-glusterd_op_sm_t *glusterd_op_state_table [] = {
- glusterd_op_state_default,
- glusterd_op_state_lock_sent,
- glusterd_op_state_locked,
- glusterd_op_state_stage_op_sent,
- glusterd_op_state_staged,
- glusterd_op_state_commit_op_sent,
- glusterd_op_state_committed,
- glusterd_op_state_unlock_sent,
- glusterd_op_state_stage_op_failed,
- glusterd_op_state_commit_op_failed,
- glusterd_op_state_brick_op_sent,
- glusterd_op_state_brick_op_failed,
- glusterd_op_state_brick_committed,
- glusterd_op_state_brick_commit_failed,
- glusterd_op_state_ack_drain
-};
+glusterd_op_sm_t *glusterd_op_state_table[] = {
+ glusterd_op_state_default, glusterd_op_state_lock_sent,
+ glusterd_op_state_locked, glusterd_op_state_stage_op_sent,
+ glusterd_op_state_staged, glusterd_op_state_commit_op_sent,
+ glusterd_op_state_committed, glusterd_op_state_unlock_sent,
+ glusterd_op_state_stage_op_failed, glusterd_op_state_commit_op_failed,
+ glusterd_op_state_brick_op_sent, glusterd_op_state_brick_op_failed,
+ glusterd_op_state_brick_committed, glusterd_op_state_brick_commit_failed,
+ glusterd_op_state_ack_drain};
int
-glusterd_op_sm_new_event (glusterd_op_sm_event_type_t event_type,
- glusterd_op_sm_event_t **new_event)
+glusterd_op_sm_new_event(glusterd_op_sm_event_type_t event_type,
+ glusterd_op_sm_event_t **new_event)
{
- glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_t *event = NULL;
- GF_ASSERT (new_event);
- GF_ASSERT (GD_OP_EVENT_NONE <= event_type &&
- GD_OP_EVENT_MAX > event_type);
+ GF_ASSERT(new_event);
+ GF_ASSERT(GD_OP_EVENT_NONE <= event_type && GD_OP_EVENT_MAX > event_type);
- event = GF_CALLOC (1, sizeof (*event), gf_gld_mt_op_sm_event_t);
+ event = GF_CALLOC(1, sizeof(*event), gf_gld_mt_op_sm_event_t);
- if (!event)
- return -1;
+ if (!event)
+ return -1;
- *new_event = event;
- event->event = event_type;
- CDS_INIT_LIST_HEAD (&event->list);
+ *new_event = event;
+ event->event = event_type;
+ CDS_INIT_LIST_HEAD(&event->list);
- return 0;
+ return 0;
}
int
-glusterd_op_sm_inject_event (glusterd_op_sm_event_type_t event_type,
- uuid_t *txn_id, void *ctx)
+glusterd_op_sm_inject_event(glusterd_op_sm_event_type_t event_type,
+ uuid_t *txn_id, void *ctx)
{
- int32_t ret = -1;
- glusterd_op_sm_event_t *event = NULL;
+ int32_t ret = -1;
+ glusterd_op_sm_event_t *event = NULL;
- GF_ASSERT (event_type < GD_OP_EVENT_MAX &&
- event_type >= GD_OP_EVENT_NONE);
+ GF_ASSERT(event_type < GD_OP_EVENT_MAX && event_type >= GD_OP_EVENT_NONE);
- ret = glusterd_op_sm_new_event (event_type, &event);
+ ret = glusterd_op_sm_new_event(event_type, &event);
- if (ret)
- goto out;
+ if (ret)
+ goto out;
- event->ctx = ctx;
+ event->ctx = ctx;
- if (txn_id)
- gf_uuid_copy (event->txn_id, *txn_id);
+ if (txn_id)
+ gf_uuid_copy(event->txn_id, *txn_id);
- gf_msg_debug (THIS->name, 0, "Enqueue event: '%s'",
- glusterd_op_sm_event_name_get (event->event));
- cds_list_add_tail (&event->list, &gd_op_sm_queue);
+ gf_msg_debug(THIS->name, 0, "Enqueue event: '%s'",
+ glusterd_op_sm_event_name_get(event->event));
+ cds_list_add_tail(&event->list, &gd_op_sm_queue);
out:
- return ret;
+ return ret;
}
void
-glusterd_destroy_req_ctx (glusterd_req_ctx_t *ctx)
+glusterd_destroy_req_ctx(glusterd_req_ctx_t *ctx)
{
- if (!ctx)
- return;
- if (ctx->dict)
- dict_unref (ctx->dict);
- GF_FREE (ctx);
+ if (!ctx)
+ return;
+ if (ctx->dict)
+ dict_unref(ctx->dict);
+ GF_FREE(ctx);
}
void
-glusterd_destroy_local_unlock_ctx (uuid_t *ctx)
+glusterd_destroy_local_unlock_ctx(uuid_t *ctx)
{
- if (!ctx)
- return;
- GF_FREE (ctx);
+ if (!ctx)
+ return;
+ GF_FREE(ctx);
}
void
-glusterd_destroy_op_event_ctx (glusterd_op_sm_event_t *event)
+glusterd_destroy_op_event_ctx(glusterd_op_sm_event_t *event)
{
- if (!event)
- return;
+ if (!event)
+ return;
- switch (event->event) {
+ switch (event->event) {
case GD_OP_EVENT_LOCK:
case GD_OP_EVENT_UNLOCK:
- glusterd_destroy_lock_ctx (event->ctx);
- break;
+ glusterd_destroy_lock_ctx(event->ctx);
+ break;
case GD_OP_EVENT_STAGE_OP:
case GD_OP_EVENT_ALL_ACK:
- glusterd_destroy_req_ctx (event->ctx);
- break;
+ glusterd_destroy_req_ctx(event->ctx);
+ break;
case GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP:
- glusterd_destroy_local_unlock_ctx (event->ctx);
- break;
+ glusterd_destroy_local_unlock_ctx(event->ctx);
+ break;
default:
- break;
- }
+ break;
+ }
}
int
-glusterd_op_sm ()
+glusterd_op_sm()
{
- glusterd_op_sm_event_t *event = NULL;
- glusterd_op_sm_event_t *tmp = NULL;
- int ret = -1;
- int lock_err = 0;
- glusterd_op_sm_ac_fn handler = NULL;
- glusterd_op_sm_t *state = NULL;
- glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
- xlator_t *this = NULL;
- glusterd_op_info_t txn_op_info;
-
- this = THIS;
- GF_ASSERT (this);
-
- ret = synclock_trylock (&gd_op_sm_lock);
- if (ret) {
- lock_err = errno;
- gf_msg (this->name, GF_LOG_ERROR, errno,
- GD_MSG_LOCK_FAIL, "lock failed due to %s",
- strerror (lock_err));
- goto lock_failed;
- }
-
- while (!cds_list_empty (&gd_op_sm_queue)) {
-
- cds_list_for_each_entry_safe (event, tmp, &gd_op_sm_queue,
- list) {
-
- cds_list_del_init (&event->list);
- event_type = event->event;
- gf_msg_debug (this->name, 0, "Dequeued event of "
- "type: '%s'",
- glusterd_op_sm_event_name_get(event_type));
-
- gf_msg_debug (this->name, 0, "transaction ID = %s",
- uuid_utoa (event->txn_id));
-
- ret = glusterd_get_txn_opinfo (&event->txn_id,
- &txn_op_info);
- if (ret) {
- gf_msg_callingfn (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_GET_FAIL,
- "Unable to get transaction "
- "opinfo for transaction ID :"
- "%s",
- uuid_utoa (event->txn_id));
- glusterd_destroy_op_event_ctx (event);
- GF_FREE (event);
- continue;
- } else
- opinfo = txn_op_info;
-
- state = glusterd_op_state_table[opinfo.state.state];
-
- GF_ASSERT (state);
-
- handler = state[event_type].handler;
- GF_ASSERT (handler);
-
- ret = handler (event, event->ctx);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_HANDLER_RETURNED,
- "handler returned: %d", ret);
- glusterd_destroy_op_event_ctx (event);
- GF_FREE (event);
- continue;
- }
-
- ret = glusterd_op_sm_transition_state (&opinfo, state,
- event_type);
-
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_EVENT_STATE_TRANSITION_FAIL,
- "Unable to transition"
- "state from '%s' to '%s'",
- glusterd_op_sm_state_name_get(opinfo.state.state),
- glusterd_op_sm_state_name_get(state[event_type].next_state));
- (void) synclock_unlock (&gd_op_sm_lock);
- return ret;
- }
-
- if ((state[event_type].next_state ==
- GD_OP_STATE_DEFAULT) &&
- (event_type == GD_OP_EVENT_UNLOCK)) {
- /* Clearing the transaction opinfo */
- ret = glusterd_clear_txn_opinfo(&event->txn_id);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_CLEAR_FAIL,
- "Unable to clear "
- "transaction's opinfo");
- } else {
- ret = glusterd_set_txn_opinfo (&event->txn_id,
- &opinfo);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_TRANS_OPINFO_SET_FAIL,
- "Unable to set "
- "transaction's opinfo");
- }
+ glusterd_op_sm_event_t *event = NULL;
+ glusterd_op_sm_event_t *tmp = NULL;
+ int ret = -1;
+ int lock_err = 0;
+ glusterd_op_sm_ac_fn handler = NULL;
+ glusterd_op_sm_t *state = NULL;
+ glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
+ xlator_t *this = NULL;
+ glusterd_op_info_t txn_op_info;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ ret = synclock_trylock(&gd_op_sm_lock);
+ if (ret) {
+ lock_err = errno;
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_LOCK_FAIL,
+ "lock failed due to %s", strerror(lock_err));
+ goto lock_failed;
+ }
+
+ while (!cds_list_empty(&gd_op_sm_queue)) {
+ cds_list_for_each_entry_safe(event, tmp, &gd_op_sm_queue, list)
+ {
+ cds_list_del_init(&event->list);
+ event_type = event->event;
+ gf_msg_debug(this->name, 0,
+ "Dequeued event of "
+ "type: '%s'",
+ glusterd_op_sm_event_name_get(event_type));
+
+ gf_msg_debug(this->name, 0, "transaction ID = %s",
+ uuid_utoa(event->txn_id));
+
+ ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
+ if (ret) {
+ gf_msg_callingfn(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_OPINFO_GET_FAIL,
+ "Unable to get transaction "
+ "opinfo for transaction ID :"
+ "%s",
+ uuid_utoa(event->txn_id));
+ glusterd_destroy_op_event_ctx(event);
+ GF_FREE(event);
+ continue;
+ } else
+ opinfo = txn_op_info;
+
+ state = glusterd_op_state_table[opinfo.state.state];
+
+ GF_ASSERT(state);
+
+ handler = state[event_type].handler;
+ GF_ASSERT(handler);
+
+ ret = handler(event, event->ctx);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDLER_RETURNED,
+ "handler returned: %d", ret);
+ glusterd_destroy_op_event_ctx(event);
+ GF_FREE(event);
+ continue;
+ }
+
+ ret = glusterd_op_sm_transition_state(&opinfo, state, event_type);
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_EVENT_STATE_TRANSITION_FAIL,
+ "Unable to transition"
+ "state from '%s' to '%s'",
+ glusterd_op_sm_state_name_get(opinfo.state.state),
+ glusterd_op_sm_state_name_get(
+ state[event_type].next_state));
+ (void)synclock_unlock(&gd_op_sm_lock);
+ return ret;
+ }
- glusterd_destroy_op_event_ctx (event);
- GF_FREE (event);
+ if ((state[event_type].next_state == GD_OP_STATE_DEFAULT) &&
+ (event_type == GD_OP_EVENT_UNLOCK)) {
+ /* Clearing the transaction opinfo */
+ ret = glusterd_clear_txn_opinfo(&event->txn_id);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_OPINFO_CLEAR_FAIL,
+ "Unable to clear "
+ "transaction's opinfo");
+ } else {
+ ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_TRANS_OPINFO_SET_FAIL,
+ "Unable to set "
+ "transaction's opinfo");
+ }
- }
+ glusterd_destroy_op_event_ctx(event);
+ GF_FREE(event);
}
+ }
-
- (void) synclock_unlock (&gd_op_sm_lock);
- ret = 0;
+ (void)synclock_unlock(&gd_op_sm_lock);
+ ret = 0;
lock_failed:
- return ret;
+ return ret;
}
int32_t
-glusterd_op_set_op (glusterd_op_t op)
+glusterd_op_set_op(glusterd_op_t op)
{
+ GF_ASSERT(op < GD_OP_MAX);
+ GF_ASSERT(op > GD_OP_NONE);
- GF_ASSERT (op < GD_OP_MAX);
- GF_ASSERT (op > GD_OP_NONE);
-
- opinfo.op = op;
-
- return 0;
+ opinfo.op = op;
+ return 0;
}
int32_t
-glusterd_op_get_op ()
+glusterd_op_get_op()
{
-
- return opinfo.op;
-
+ return opinfo.op;
}
int32_t
-glusterd_op_set_req (rpcsvc_request_t *req)
+glusterd_op_set_req(rpcsvc_request_t *req)
{
-
- GF_ASSERT (req);
- opinfo.req = req;
- return 0;
+ GF_ASSERT(req);
+ opinfo.req = req;
+ return 0;
}
int32_t
-glusterd_op_clear_op (glusterd_op_t op)
+glusterd_op_clear_op(glusterd_op_t op)
{
+ opinfo.op = GD_OP_NONE;
- opinfo.op = GD_OP_NONE;
-
- return 0;
-
+ return 0;
}
int32_t
-glusterd_op_free_ctx (glusterd_op_t op, void *ctx)
+glusterd_op_free_ctx(glusterd_op_t op, void *ctx)
{
-
- if (ctx) {
- switch (op) {
- case GD_OP_CREATE_VOLUME:
- case GD_OP_DELETE_VOLUME:
- case GD_OP_STOP_VOLUME:
- case GD_OP_ADD_BRICK:
- case GD_OP_REMOVE_BRICK:
- case GD_OP_REPLACE_BRICK:
- case GD_OP_LOG_ROTATE:
- case GD_OP_SYNC_VOLUME:
- case GD_OP_SET_VOLUME:
- case GD_OP_START_VOLUME:
- case GD_OP_RESET_VOLUME:
- case GD_OP_GSYNC_SET:
- case GD_OP_QUOTA:
- case GD_OP_PROFILE_VOLUME:
- case GD_OP_STATUS_VOLUME:
- case GD_OP_REBALANCE:
- case GD_OP_TIER_START_STOP:
- case GD_OP_HEAL_VOLUME:
- case GD_OP_STATEDUMP_VOLUME:
- case GD_OP_CLEARLOCKS_VOLUME:
- case GD_OP_DEFRAG_BRICK_VOLUME:
- case GD_OP_MAX_OPVERSION:
- case GD_OP_TIER_STATUS:
- dict_unref (ctx);
- break;
- default:
- GF_ASSERT (0);
- break;
- }
+ if (ctx) {
+ switch (op) {
+ case GD_OP_CREATE_VOLUME:
+ case GD_OP_DELETE_VOLUME:
+ case GD_OP_STOP_VOLUME:
+ case GD_OP_ADD_BRICK:
+ case GD_OP_REMOVE_BRICK:
+ case GD_OP_REPLACE_BRICK:
+ case GD_OP_LOG_ROTATE:
+ case GD_OP_SYNC_VOLUME:
+ case GD_OP_SET_VOLUME:
+ case GD_OP_START_VOLUME:
+ case GD_OP_RESET_VOLUME:
+ case GD_OP_GSYNC_SET:
+ case GD_OP_QUOTA:
+ case GD_OP_PROFILE_VOLUME:
+ case GD_OP_STATUS_VOLUME:
+ case GD_OP_REBALANCE:
+ case GD_OP_TIER_START_STOP:
+ case GD_OP_HEAL_VOLUME:
+ case GD_OP_STATEDUMP_VOLUME:
+ case GD_OP_CLEARLOCKS_VOLUME:
+ case GD_OP_DEFRAG_BRICK_VOLUME:
+ case GD_OP_MAX_OPVERSION:
+ case GD_OP_TIER_STATUS:
+ dict_unref(ctx);
+ break;
+ default:
+ GF_ASSERT(0);
+ break;
}
+ }
- glusterd_op_reset_ctx ();
- return 0;
-
+ glusterd_op_reset_ctx();
+ return 0;
}
void *
-glusterd_op_get_ctx ()
+glusterd_op_get_ctx()
{
-
- return opinfo.op_ctx;
-
+ return opinfo.op_ctx;
}
int
-glusterd_op_sm_init ()
+glusterd_op_sm_init()
{
- CDS_INIT_LIST_HEAD (&gd_op_sm_queue);
- synclock_init (&gd_op_sm_lock, SYNC_LOCK_DEFAULT);
- return 0;
+ CDS_INIT_LIST_HEAD(&gd_op_sm_queue);
+ synclock_init(&gd_op_sm_lock, SYNC_LOCK_DEFAULT);
+ return 0;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
index bd15633cdc2..7d2d28520fc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
@@ -16,73 +16,73 @@
#include "glusterd-utils.h"
void
-glusterd_peerinfo_destroy (struct rcu_head *head)
+glusterd_peerinfo_destroy(struct rcu_head *head)
{
- int32_t ret = -1;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_peer_hostname_t *hostname = NULL;
- glusterd_peer_hostname_t *tmp = NULL;
+ int32_t ret = -1;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peer_hostname_t *hostname = NULL;
+ glusterd_peer_hostname_t *tmp = NULL;
- /* This works as rcu_head is the first member of gd_rcu_head */
- peerinfo = caa_container_of ((gd_rcu_head *)head, glusterd_peerinfo_t,
- rcu_head);
+ /* This works as rcu_head is the first member of gd_rcu_head */
+ peerinfo = caa_container_of((gd_rcu_head *)head, glusterd_peerinfo_t,
+ rcu_head);
- /* Set THIS to the saved this. Needed by some functions below */
- THIS = peerinfo->rcu_head.this;
+ /* Set THIS to the saved this. Needed by some functions below */
+ THIS = peerinfo->rcu_head.this;
- CDS_INIT_LIST_HEAD (&peerinfo->uuid_list);
+ CDS_INIT_LIST_HEAD(&peerinfo->uuid_list);
- ret = glusterd_store_delete_peerinfo (peerinfo);
- if (ret) {
- gf_msg ("glusterd", GF_LOG_ERROR, errno,
- GD_MSG_PEERINFO_DELETE_FAIL,
- "Deleting peer info failed");
- }
+ ret = glusterd_store_delete_peerinfo(peerinfo);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_PEERINFO_DELETE_FAIL,
+ "Deleting peer info failed");
+ }
- GF_FREE (peerinfo->hostname);
- peerinfo->hostname = NULL;
+ GF_FREE(peerinfo->hostname);
+ peerinfo->hostname = NULL;
- cds_list_for_each_entry_safe (hostname, tmp, &peerinfo->hostnames,
- hostname_list) {
- glusterd_peer_hostname_free (hostname);
- }
+ cds_list_for_each_entry_safe(hostname, tmp, &peerinfo->hostnames,
+ hostname_list)
+ {
+ glusterd_peer_hostname_free(hostname);
+ }
- glusterd_sm_tr_log_delete (&peerinfo->sm_log);
- pthread_mutex_destroy (&peerinfo->delete_lock);
- GF_FREE (peerinfo);
+ glusterd_sm_tr_log_delete(&peerinfo->sm_log);
+ pthread_mutex_destroy(&peerinfo->delete_lock);
+ GF_FREE(peerinfo);
- peerinfo = NULL;
+ peerinfo = NULL;
- return;
+ return;
}
int32_t
-glusterd_peerinfo_cleanup (glusterd_peerinfo_t *peerinfo)
+glusterd_peerinfo_cleanup(glusterd_peerinfo_t *peerinfo)
{
- GF_ASSERT (peerinfo);
- gf_boolean_t quorum_action = _gf_false;
- glusterd_conf_t *priv = THIS->private;
-
- if (pthread_mutex_trylock (&peerinfo->delete_lock)) {
- /* Someone else is already deleting the peer, so give up */
- return 0;
- }
-
- if (peerinfo->quorum_contrib != QUORUM_NONE)
- quorum_action = _gf_true;
- if (peerinfo->rpc) {
- peerinfo->rpc = glusterd_rpc_clnt_unref (priv, peerinfo->rpc);
- peerinfo->rpc = NULL;
- }
+ GF_ASSERT(peerinfo);
+ gf_boolean_t quorum_action = _gf_false;
+ glusterd_conf_t *priv = THIS->private;
- cds_list_del_rcu (&peerinfo->uuid_list);
- /* Saving THIS, as it is needed by the callback function */
- peerinfo->rcu_head.this = THIS;
- call_rcu (&peerinfo->rcu_head.head, glusterd_peerinfo_destroy);
-
- if (quorum_action)
- glusterd_do_quorum_action ();
+ if (pthread_mutex_trylock(&peerinfo->delete_lock)) {
+ /* Someone else is already deleting the peer, so give up */
return 0;
+ }
+
+ if (peerinfo->quorum_contrib != QUORUM_NONE)
+ quorum_action = _gf_true;
+ if (peerinfo->rpc) {
+ peerinfo->rpc = glusterd_rpc_clnt_unref(priv, peerinfo->rpc);
+ peerinfo->rpc = NULL;
+ }
+
+ cds_list_del_rcu(&peerinfo->uuid_list);
+ /* Saving THIS, as it is needed by the callback function */
+ peerinfo->rcu_head.this = THIS;
+ call_rcu(&peerinfo->rcu_head.head, glusterd_peerinfo_destroy);
+
+ if (quorum_action)
+ glusterd_do_quorum_action();
+ return 0;
}
/* glusterd_peerinfo_find_by_hostname searches for a peer which matches the
@@ -94,79 +94,76 @@ glusterd_peerinfo_cleanup (glusterd_peerinfo_t *peerinfo)
* the resolved addrinfos.
*/
glusterd_peerinfo_t *
-glusterd_peerinfo_find_by_hostname (const char *hoststr)
+glusterd_peerinfo_find_by_hostname(const char *hoststr)
{
- int ret = -1;
- struct addrinfo *addr = NULL;
- struct addrinfo *p = NULL;
- xlator_t *this = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
-
-
- this = THIS;
- GF_ASSERT (hoststr);
-
- peerinfo = NULL;
-
- peerinfo = gd_peerinfo_find_from_hostname (hoststr);
- if (peerinfo)
- return peerinfo;
-
- ret = getaddrinfo (hoststr, NULL, NULL, &addr);
- if (ret != 0) {
- gf_msg (this->name, GF_LOG_ERROR, ret,
- GD_MSG_GETADDRINFO_FAIL,
- "error in getaddrinfo: %s\n",
- gai_strerror(ret));
- goto out;
- }
-
- for (p = addr; p != NULL; p = p->ai_next) {
- peerinfo = gd_peerinfo_find_from_addrinfo (p);
- if (peerinfo) {
- freeaddrinfo (addr);
- return peerinfo;
- }
+ int ret = -1;
+ struct addrinfo *addr = NULL;
+ struct addrinfo *p = NULL;
+ xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+
+ this = THIS;
+ GF_ASSERT(hoststr);
+
+ peerinfo = NULL;
+
+ peerinfo = gd_peerinfo_find_from_hostname(hoststr);
+ if (peerinfo)
+ return peerinfo;
+
+ ret = getaddrinfo(hoststr, NULL, NULL, &addr);
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, ret, GD_MSG_GETADDRINFO_FAIL,
+ "error in getaddrinfo: %s\n", gai_strerror(ret));
+ goto out;
+ }
+
+ for (p = addr; p != NULL; p = p->ai_next) {
+ peerinfo = gd_peerinfo_find_from_addrinfo(p);
+ if (peerinfo) {
+ freeaddrinfo(addr);
+ return peerinfo;
}
+ }
out:
- gf_msg_debug (this->name, 0, "Unable to find friend: %s", hoststr);
- if (addr)
- freeaddrinfo (addr);
- return NULL;
+ gf_msg_debug(this->name, 0, "Unable to find friend: %s", hoststr);
+ if (addr)
+ freeaddrinfo(addr);
+ return NULL;
}
int
-glusterd_hostname_to_uuid (char *hostname, uuid_t uuid)
+glusterd_hostname_to_uuid(char *hostname, uuid_t uuid)
{
- GF_ASSERT (hostname);
- GF_ASSERT (uuid);
+ GF_ASSERT(hostname);
+ GF_ASSERT(uuid);
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_conf_t *priv = NULL;
- int ret = -1;
- xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
- this = THIS;
- GF_ASSERT (this);
- priv = this->private;
- GF_ASSERT (priv);
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
- peerinfo = glusterd_peerinfo_find_by_hostname (hostname);
- if (peerinfo) {
- ret = 0;
- gf_uuid_copy (uuid, peerinfo->uuid);
+ peerinfo = glusterd_peerinfo_find_by_hostname(hostname);
+ if (peerinfo) {
+ ret = 0;
+ gf_uuid_copy(uuid, peerinfo->uuid);
+ } else {
+ if (gf_is_local_addr(hostname)) {
+ gf_uuid_copy(uuid, MY_UUID);
+ ret = 0;
} else {
- if (gf_is_local_addr (hostname)) {
- gf_uuid_copy (uuid, MY_UUID);
- ret = 0;
- } else {
- ret = -1;
- }
+ ret = -1;
}
+ }
- gf_msg_debug (this->name, 0, "returning %d", ret);
- return ret;
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ return ret;
}
/* glusterd_peerinfo_find_by_uuid searches for a peer which matches the
@@ -174,40 +171,39 @@ glusterd_hostname_to_uuid (char *hostname, uuid_t uuid)
* Returns NULL otherwise.
*/
glusterd_peerinfo_t *
-glusterd_peerinfo_find_by_uuid (uuid_t uuid)
+glusterd_peerinfo_find_by_uuid(uuid_t uuid)
{
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *entry = NULL;
- glusterd_peerinfo_t *found = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *entry = NULL;
+ glusterd_peerinfo_t *found = NULL;
+ xlator_t *this = NULL;
- priv = this->private;
+ this = THIS;
+ GF_ASSERT(this);
- GF_ASSERT (priv);
+ priv = this->private;
- if (gf_uuid_is_null (uuid))
- return NULL;
+ GF_ASSERT(priv);
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
- if (!gf_uuid_compare (entry->uuid, uuid)) {
+ if (gf_uuid_is_null(uuid))
+ return NULL;
- gf_msg_debug (this->name, 0,
- "Friend found... state: %s",
- glusterd_friend_sm_state_name_get (entry->state.state));
- found = entry; /* Probably should be rcu_dereferenced */
- break;
- }
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
+ {
+ if (!gf_uuid_compare(entry->uuid, uuid)) {
+ gf_msg_debug(this->name, 0, "Friend found... state: %s",
+ glusterd_friend_sm_state_name_get(entry->state.state));
+ found = entry; /* Probably should be rcu_dereferenced */
+ break;
}
- rcu_read_unlock ();
+ }
+ rcu_read_unlock();
- if (!found)
- gf_msg_debug (this->name, 0,
- "Friend with uuid: %s, not found", uuid_utoa (uuid));
- return found;
+ if (!found)
+ gf_msg_debug(this->name, 0, "Friend with uuid: %s, not found",
+ uuid_utoa(uuid));
+ return found;
}
/* glusterd_peerinfo_find will search for a peer matching either @uuid or
@@ -215,39 +211,36 @@ glusterd_peerinfo_find_by_uuid (uuid_t uuid)
* Returns NULL otherwise.
*/
glusterd_peerinfo_t *
-glusterd_peerinfo_find (uuid_t uuid, const char *hostname)
+glusterd_peerinfo_find(uuid_t uuid, const char *hostname)
{
- glusterd_peerinfo_t *peerinfo = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
- if (uuid) {
- peerinfo = glusterd_peerinfo_find_by_uuid (uuid);
-
- if (peerinfo) {
- return peerinfo;
- } else {
- gf_msg_debug (this->name, 0,
- "Unable to find peer by uuid: %s",
- uuid_utoa (uuid));
- }
+ if (uuid) {
+ peerinfo = glusterd_peerinfo_find_by_uuid(uuid);
+ if (peerinfo) {
+ return peerinfo;
+ } else {
+ gf_msg_debug(this->name, 0, "Unable to find peer by uuid: %s",
+ uuid_utoa(uuid));
}
+ }
- if (hostname) {
- peerinfo = glusterd_peerinfo_find_by_hostname (hostname);
+ if (hostname) {
+ peerinfo = glusterd_peerinfo_find_by_hostname(hostname);
- if (peerinfo) {
- return peerinfo;
- } else {
- gf_msg_debug (this->name, 0,
- "Unable to find hostname: %s", hostname);
- }
+ if (peerinfo) {
+ return peerinfo;
+ } else {
+ gf_msg_debug(this->name, 0, "Unable to find hostname: %s",
+ hostname);
}
- return NULL;
+ }
+ return NULL;
}
/* glusterd_peerinfo_new will create a new peerinfo object and set it's members
@@ -259,296 +252,295 @@ glusterd_peerinfo_find (uuid_t uuid, const char *hostname)
* object.
*/
glusterd_peerinfo_t *
-glusterd_peerinfo_new (glusterd_friend_sm_state_t state, uuid_t *uuid,
- const char *hostname, int port)
+glusterd_peerinfo_new(glusterd_friend_sm_state_t state, uuid_t *uuid,
+ const char *hostname, int port)
{
- glusterd_peerinfo_t *new_peer = NULL;
- int ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
- GF_ASSERT (conf);
-
- new_peer = GF_CALLOC (1, sizeof (*new_peer), gf_gld_mt_peerinfo_t);
- if (!new_peer)
- goto out;
+ glusterd_peerinfo_t *new_peer = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
- CDS_INIT_LIST_HEAD (&new_peer->uuid_list);
+ this = THIS;
+ GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
- new_peer->state.state = state;
+ new_peer = GF_CALLOC(1, sizeof(*new_peer), gf_gld_mt_peerinfo_t);
+ if (!new_peer)
+ goto out;
- CDS_INIT_LIST_HEAD (&new_peer->hostnames);
- if (hostname) {
- ret = gd_add_address_to_peer (new_peer, hostname);
- if (ret)
- goto out;
- /* Also set it to peerinfo->hostname. Doing this as we use
- * peerinfo->hostname in a lot of places and is really hard to
- * get everything right
- */
- new_peer->hostname = gf_strdup (hostname);
- }
+ CDS_INIT_LIST_HEAD(&new_peer->uuid_list);
- if (uuid) {
- gf_uuid_copy (new_peer->uuid, *uuid);
- }
+ new_peer->state.state = state;
- ret = glusterd_sm_tr_log_init (&new_peer->sm_log,
- glusterd_friend_sm_state_name_get,
- glusterd_friend_sm_event_name_get,
- GLUSTERD_TR_LOG_SIZE);
+ CDS_INIT_LIST_HEAD(&new_peer->hostnames);
+ if (hostname) {
+ ret = gd_add_address_to_peer(new_peer, hostname);
if (ret)
- goto out;
+ goto out;
+ /* Also set it to peerinfo->hostname. Doing this as we use
+ * peerinfo->hostname in a lot of places and is really hard to
+ * get everything right
+ */
+ new_peer->hostname = gf_strdup(hostname);
+ }
+
+ if (uuid) {
+ gf_uuid_copy(new_peer->uuid, *uuid);
+ }
- if (new_peer->state.state == GD_FRIEND_STATE_BEFRIENDED)
- new_peer->quorum_contrib = QUORUM_WAITING;
- new_peer->port = port;
+ ret = glusterd_sm_tr_log_init(
+ &new_peer->sm_log, glusterd_friend_sm_state_name_get,
+ glusterd_friend_sm_event_name_get, GLUSTERD_TR_LOG_SIZE);
+ if (ret)
+ goto out;
- pthread_mutex_init (&new_peer->delete_lock, NULL);
+ if (new_peer->state.state == GD_FRIEND_STATE_BEFRIENDED)
+ new_peer->quorum_contrib = QUORUM_WAITING;
+ new_peer->port = port;
- new_peer->generation = uatomic_add_return (&conf->generation, 1);
+ pthread_mutex_init(&new_peer->delete_lock, NULL);
+
+ new_peer->generation = uatomic_add_return(&conf->generation, 1);
out:
- if (ret && new_peer) {
- glusterd_peerinfo_cleanup (new_peer);
- new_peer = NULL;
- }
- return new_peer;
+ if (ret && new_peer) {
+ glusterd_peerinfo_cleanup(new_peer);
+ new_peer = NULL;
+ }
+ return new_peer;
}
/* Check if the all peers are connected and befriended, except the peer
* specified (the peer being detached)
*/
gf_boolean_t
-glusterd_chk_peers_connected_befriended (uuid_t skip_uuid)
+glusterd_chk_peers_connected_befriended(uuid_t skip_uuid)
{
- gf_boolean_t ret = _gf_true;
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_conf_t *priv = NULL;
-
- priv= THIS->private;
- GF_ASSERT (priv);
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
-
- if (!gf_uuid_is_null (skip_uuid) && !gf_uuid_compare (skip_uuid,
- peerinfo->uuid))
- continue;
-
- if ((GD_FRIEND_STATE_BEFRIENDED != peerinfo->state.state)
- || !(peerinfo->connected)) {
- ret = _gf_false;
- break;
- }
+ gf_boolean_t ret = _gf_true;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ priv = THIS->private;
+ GF_ASSERT(priv);
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
+ {
+ if (!gf_uuid_is_null(skip_uuid) &&
+ !gf_uuid_compare(skip_uuid, peerinfo->uuid))
+ continue;
+
+ if ((GD_FRIEND_STATE_BEFRIENDED != peerinfo->state.state) ||
+ !(peerinfo->connected)) {
+ ret = _gf_false;
+ break;
}
- rcu_read_unlock ();
+ }
+ rcu_read_unlock();
- gf_msg_debug (THIS->name, 0, "Returning %s",
- (ret?"TRUE":"FALSE"));
- return ret;
+ gf_msg_debug(THIS->name, 0, "Returning %s", (ret ? "TRUE" : "FALSE"));
+ return ret;
}
/* Return hostname for given uuid if it exists
* else return NULL
*/
char *
-glusterd_uuid_to_hostname (uuid_t uuid)
+glusterd_uuid_to_hostname(uuid_t uuid)
{
- char *hostname = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *entry = NULL;
-
- priv = THIS->private;
- GF_ASSERT (priv);
-
- if (!gf_uuid_compare (MY_UUID, uuid)) {
- hostname = gf_strdup ("localhost");
+ char *hostname = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *entry = NULL;
+
+ priv = THIS->private;
+ GF_ASSERT(priv);
+
+ if (!gf_uuid_compare(MY_UUID, uuid)) {
+ hostname = gf_strdup("localhost");
+ }
+ rcu_read_lock();
+ if (!cds_list_empty(&priv->peers)) {
+ cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
+ {
+ if (!gf_uuid_compare(entry->uuid, uuid)) {
+ hostname = gf_strdup(entry->hostname);
+ break;
+ }
}
- rcu_read_lock ();
- if (!cds_list_empty (&priv->peers)) {
- cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
- if (!gf_uuid_compare (entry->uuid, uuid)) {
- hostname = gf_strdup (entry->hostname);
- break;
- }
- }
- }
- rcu_read_unlock ();
+ }
+ rcu_read_unlock();
- return hostname;
+ return hostname;
}
-char*
-gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo)
+char *
+gd_peer_uuid_str(glusterd_peerinfo_t *peerinfo)
{
- if ((peerinfo == NULL) || gf_uuid_is_null (peerinfo->uuid))
- return NULL;
+ if ((peerinfo == NULL) || gf_uuid_is_null(peerinfo->uuid))
+ return NULL;
- if (peerinfo->uuid_str[0] == '\0')
- uuid_utoa_r (peerinfo->uuid, peerinfo->uuid_str);
+ if (peerinfo->uuid_str[0] == '\0')
+ uuid_utoa_r(peerinfo->uuid, peerinfo->uuid_str);
- return peerinfo->uuid_str;
+ return peerinfo->uuid_str;
}
gf_boolean_t
-glusterd_are_all_peers_up ()
+glusterd_are_all_peers_up()
{
- glusterd_peerinfo_t *peerinfo = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- gf_boolean_t peers_up = _gf_false;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, conf, out);
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
- if (!peerinfo->connected) {
- rcu_read_unlock ();
- goto out;
- }
+ glusterd_peerinfo_t *peerinfo = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_boolean_t peers_up = _gf_false;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", this, out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
+ {
+ if (!peerinfo->connected) {
+ rcu_read_unlock();
+ goto out;
}
- rcu_read_unlock ();
+ }
+ rcu_read_unlock();
- peers_up = _gf_true;
+ peers_up = _gf_true;
out:
- return peers_up;
+ return peers_up;
}
gf_boolean_t
-glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
- struct cds_list_head *peers,
- char **down_peerstr)
+glusterd_are_vol_all_peers_up(glusterd_volinfo_t *volinfo,
+ struct cds_list_head *peers, char **down_peerstr)
{
- glusterd_peerinfo_t *peerinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- gf_boolean_t ret = _gf_false;
-
- cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (!gf_uuid_compare (brickinfo->uuid, MY_UUID))
- continue;
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peerinfo, peers, uuid_list) {
- if (gf_uuid_compare (peerinfo->uuid, brickinfo->uuid))
- continue;
-
- /*Found peer who owns the brick, return false
- * if peer is not connected or not friend */
- if (!(peerinfo->connected) ||
- (peerinfo->state.state !=
- GD_FRIEND_STATE_BEFRIENDED)) {
- *down_peerstr = gf_strdup (peerinfo->hostname);
- gf_msg_debug (THIS->name, 0, "Peer %s is down. ",
- peerinfo->hostname);
- rcu_read_unlock ();
- goto out;
- }
- }
- rcu_read_unlock ();
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ gf_boolean_t ret = _gf_false;
+
+ cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
+ {
+ if (!gf_uuid_compare(brickinfo->uuid, MY_UUID))
+ continue;
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peerinfo, peers, uuid_list)
+ {
+ if (gf_uuid_compare(peerinfo->uuid, brickinfo->uuid))
+ continue;
+
+ /*Found peer who owns the brick, return false
+ * if peer is not connected or not friend */
+ if (!(peerinfo->connected) ||
+ (peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED)) {
+ *down_peerstr = gf_strdup(peerinfo->hostname);
+ gf_msg_debug(THIS->name, 0, "Peer %s is down. ",
+ peerinfo->hostname);
+ rcu_read_unlock();
+ goto out;
+ }
}
+ rcu_read_unlock();
+ }
- ret = _gf_true;
+ ret = _gf_true;
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
int32_t
-glusterd_peer_hostname_new (const char *hostname,
- glusterd_peer_hostname_t **name)
+glusterd_peer_hostname_new(const char *hostname,
+ glusterd_peer_hostname_t **name)
{
- glusterd_peer_hostname_t *peer_hostname = NULL;
- int32_t ret = -1;
+ glusterd_peer_hostname_t *peer_hostname = NULL;
+ int32_t ret = -1;
- GF_ASSERT (hostname);
- GF_ASSERT (name);
+ GF_ASSERT(hostname);
+ GF_ASSERT(name);
- peer_hostname = GF_CALLOC (1, sizeof (*peer_hostname),
- gf_gld_mt_peer_hostname_t);
+ peer_hostname = GF_CALLOC(1, sizeof(*peer_hostname),
+ gf_gld_mt_peer_hostname_t);
- if (!peer_hostname)
- goto out;
+ if (!peer_hostname)
+ goto out;
- peer_hostname->hostname = gf_strdup (hostname);
- CDS_INIT_LIST_HEAD (&peer_hostname->hostname_list);
+ peer_hostname->hostname = gf_strdup(hostname);
+ CDS_INIT_LIST_HEAD(&peer_hostname->hostname_list);
- *name = peer_hostname;
- ret = 0;
+ *name = peer_hostname;
+ ret = 0;
out:
- gf_msg_debug ("glusterd", 0, "Returning %d", ret);
- return ret;
+ gf_msg_debug("glusterd", 0, "Returning %d", ret);
+ return ret;
}
void
-glusterd_peer_hostname_free (glusterd_peer_hostname_t *name)
+glusterd_peer_hostname_free(glusterd_peer_hostname_t *name)
{
- if (!name)
- return;
+ if (!name)
+ return;
- cds_list_del_init (&name->hostname_list);
+ cds_list_del_init(&name->hostname_list);
- GF_FREE (name->hostname);
- name->hostname = NULL;
+ GF_FREE(name->hostname);
+ name->hostname = NULL;
- GF_FREE (name);
+ GF_FREE(name);
- return;
+ return;
}
gf_boolean_t
-gd_peer_has_address (glusterd_peerinfo_t *peerinfo, const char *address)
+gd_peer_has_address(glusterd_peerinfo_t *peerinfo, const char *address)
{
- gf_boolean_t ret = _gf_false;
- glusterd_peer_hostname_t *hostname = NULL;
+ gf_boolean_t ret = _gf_false;
+ glusterd_peer_hostname_t *hostname = NULL;
- GF_VALIDATE_OR_GOTO ("glusterd", (peerinfo != NULL), out);
- GF_VALIDATE_OR_GOTO ("glusterd", (address != NULL), out);
+ GF_VALIDATE_OR_GOTO("glusterd", (peerinfo != NULL), out);
+ GF_VALIDATE_OR_GOTO("glusterd", (address != NULL), out);
- cds_list_for_each_entry (hostname, &peerinfo->hostnames,
- hostname_list) {
- if (strcmp (hostname->hostname, address) == 0) {
- ret = _gf_true;
- break;
- }
+ cds_list_for_each_entry(hostname, &peerinfo->hostnames, hostname_list)
+ {
+ if (strcmp(hostname->hostname, address) == 0) {
+ ret = _gf_true;
+ break;
}
+ }
out:
- return ret;
+ return ret;
}
int
-gd_add_address_to_peer (glusterd_peerinfo_t *peerinfo, const char *address)
+gd_add_address_to_peer(glusterd_peerinfo_t *peerinfo, const char *address)
{
+ int ret = -1;
+ glusterd_peer_hostname_t *hostname = NULL;
- int ret = -1;
- glusterd_peer_hostname_t *hostname = NULL;
+ GF_VALIDATE_OR_GOTO("glusterd", (peerinfo != NULL), out);
+ GF_VALIDATE_OR_GOTO("glusterd", (address != NULL), out);
- GF_VALIDATE_OR_GOTO ("glusterd", (peerinfo != NULL), out);
- GF_VALIDATE_OR_GOTO ("glusterd", (address != NULL), out);
+ if (gd_peer_has_address(peerinfo, address)) {
+ ret = 0;
+ goto out;
+ }
- if (gd_peer_has_address (peerinfo, address)) {
- ret = 0;
- goto out;
- }
+ ret = glusterd_peer_hostname_new(address, &hostname);
+ if (ret)
+ goto out;
- ret = glusterd_peer_hostname_new (address, &hostname);
- if (ret)
- goto out;
+ cds_list_add_tail_rcu(&hostname->hostname_list, &peerinfo->hostnames);
- cds_list_add_tail_rcu (&hostname->hostname_list, &peerinfo->hostnames);
-
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
/* gd_add_friend_to_dict() adds details of @friend into @dict with the given
@@ -558,80 +550,78 @@ out:
* is >= GD_OP_VERSION_3_6_0
*/
int
-gd_add_friend_to_dict (glusterd_peerinfo_t *friend, dict_t *dict,
- const char *prefix)
+gd_add_friend_to_dict(glusterd_peerinfo_t *friend, dict_t *dict,
+ const char *prefix)
{
- int ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- char key[100] = {0,};
- glusterd_peer_hostname_t *address = NULL;
- int count = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO ("glusterd", (this != NULL), out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, (conf != NULL), out);
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ char key[100] = {
+ 0,
+ };
+ glusterd_peer_hostname_t *address = NULL;
+ int count = 0;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO("glusterd", (this != NULL), out);
+
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, (conf != NULL), out);
+
+ GF_VALIDATE_OR_GOTO(this->name, (friend != NULL), out);
+ GF_VALIDATE_OR_GOTO(this->name, (dict != NULL), out);
+ GF_VALIDATE_OR_GOTO(this->name, (prefix != NULL), out);
+
+ snprintf(key, sizeof(key), "%s.uuid", prefix);
+ ret = dict_set_dynstr_with_alloc(dict, key, uuid_utoa(friend->uuid));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set key %s in dict", key);
+ goto out;
+ }
+
+ /* Setting the first hostname from the list with this key for backward
+ * compatibility
+ */
+ snprintf(key, sizeof(key), "%s.hostname", prefix);
+ address = cds_list_entry(&friend->hostnames, glusterd_peer_hostname_t,
+ hostname_list);
+ ret = dict_set_dynstr_with_alloc(dict, key, address->hostname);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set key %s in dict", key);
+ goto out;
+ }
+
+ if (conf->op_version < GD_OP_VERSION_3_6_0) {
+ ret = 0;
+ goto out;
+ }
- GF_VALIDATE_OR_GOTO (this->name, (friend != NULL), out);
- GF_VALIDATE_OR_GOTO (this->name, (dict != NULL), out);
- GF_VALIDATE_OR_GOTO (this->name, (prefix != NULL), out);
+ address = NULL;
+ count = 0;
+ cds_list_for_each_entry(address, &friend->hostnames, hostname_list)
+ {
+ GF_VALIDATE_OR_GOTO(this->name, (address != NULL), out);
- snprintf (key, sizeof (key), "%s.uuid", prefix);
- ret = dict_set_dynstr_with_alloc (dict, key, uuid_utoa (friend->uuid));
+ snprintf(key, sizeof(key), "%s.hostname%d", prefix, count);
+ ret = dict_set_dynstr_with_alloc(dict, key, address->hostname);
if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set key %s in dict", key);
- goto out;
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set key %s in dict", key);
+ goto out;
}
-
- /* Setting the first hostname from the list with this key for backward
- * compatibility
- */
- snprintf (key, sizeof (key), "%s.hostname", prefix);
- address = cds_list_entry (&friend->hostnames, glusterd_peer_hostname_t,
- hostname_list);
- ret = dict_set_dynstr_with_alloc (dict, key, address->hostname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set key %s in dict", key);
- goto out;
- }
-
- if (conf->op_version < GD_OP_VERSION_3_6_0) {
- ret = 0;
- goto out;
- }
-
- address = NULL;
- count = 0;
- cds_list_for_each_entry (address, &friend->hostnames, hostname_list) {
- GF_VALIDATE_OR_GOTO (this->name, (address != NULL), out);
-
- snprintf (key, sizeof (key), "%s.hostname%d", prefix, count);
- ret = dict_set_dynstr_with_alloc (dict, key, address->hostname);
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set key %s in dict", key);
- goto out;
- }
- count++;
- }
- ret = snprintf (key, sizeof (key), "%s.address-count", prefix);
- ret = dict_set_int32n (dict, key, ret, count);
- if (ret)
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SET_FAILED,
- "Failed to set key %s in dict", key);
+ count++;
+ }
+ ret = snprintf(key, sizeof(key), "%s.address-count", prefix);
+ ret = dict_set_int32n(dict, key, ret, count);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set key %s in dict", key);
out:
- gf_msg_debug (this ? this->name : "glusterd", 0, "Returning %d",
- ret);
- return ret;
+ gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret);
+ return ret;
}
/* gd_peerinfo_find_from_hostname iterates over all the addresses saved for each
@@ -639,39 +629,39 @@ out:
* Returns the matched peer if found else returns NULL
*/
glusterd_peerinfo_t *
-gd_peerinfo_find_from_hostname (const char *hoststr)
+gd_peerinfo_find_from_hostname(const char *hoststr)
{
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_peerinfo_t *peer = NULL;
- glusterd_peerinfo_t *found = NULL;
- glusterd_peer_hostname_t *tmphost = NULL;
-
- this = THIS;
- GF_ASSERT (this != NULL);
- priv = this->private;
- GF_VALIDATE_OR_GOTO (this->name, (priv != NULL), out);
-
- GF_VALIDATE_OR_GOTO (this->name, (hoststr != NULL), out);
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peer, &priv->peers, uuid_list) {
- cds_list_for_each_entry_rcu (tmphost, &peer->hostnames,
- hostname_list) {
- if (!strncasecmp (tmphost->hostname, hoststr, 1024)) {
- gf_msg_debug (this->name, 0,
- "Friend %s found.. state: %d",
- tmphost->hostname, peer->state.state);
- found = peer; /* Probably needs to be
- dereferenced*/
- goto unlock;
- }
- }
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *peer = NULL;
+ glusterd_peerinfo_t *found = NULL;
+ glusterd_peer_hostname_t *tmphost = NULL;
+
+ this = THIS;
+ GF_ASSERT(this != NULL);
+ priv = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, (priv != NULL), out);
+
+ GF_VALIDATE_OR_GOTO(this->name, (hoststr != NULL), out);
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peer, &priv->peers, uuid_list)
+ {
+ cds_list_for_each_entry_rcu(tmphost, &peer->hostnames, hostname_list)
+ {
+ if (!strncasecmp(tmphost->hostname, hoststr, 1024)) {
+ gf_msg_debug(this->name, 0, "Friend %s found.. state: %d",
+ tmphost->hostname, peer->state.state);
+ found = peer; /* Probably needs to be
+ dereferenced*/
+ goto unlock;
+ }
}
+ }
unlock:
- rcu_read_unlock ();
+ rcu_read_unlock();
out:
- return found;
+ return found;
}
/* gd_peerinfo_find_from_addrinfo iterates over all the addresses saved for each
@@ -685,61 +675,59 @@ out:
* Returns the matched peer if found else returns NULL
*/
glusterd_peerinfo_t *
-gd_peerinfo_find_from_addrinfo (const struct addrinfo *addr)
+gd_peerinfo_find_from_addrinfo(const struct addrinfo *addr)
{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- glusterd_peerinfo_t *peer = NULL;
- glusterd_peerinfo_t *found = NULL;
- glusterd_peer_hostname_t *address = NULL;
- int ret = 0;
- struct addrinfo *paddr = NULL;
- struct addrinfo *tmp = NULL;
-
- this = THIS;
- GF_ASSERT (this != NULL);
- conf = this->private;
- GF_VALIDATE_OR_GOTO (this->name, (conf != NULL), out);
-
- GF_VALIDATE_OR_GOTO (this->name, (addr != NULL), out);
-
- rcu_read_lock ();
- cds_list_for_each_entry_rcu (peer, &conf->peers, uuid_list) {
- cds_list_for_each_entry_rcu (address, &peer->hostnames,
- hostname_list) {
- /* TODO: Cache the resolved addrinfos to improve
- * performance
- */
- ret = getaddrinfo (address->hostname, NULL, NULL,
- &paddr);
- if (ret) {
- /* Don't fail if getaddrinfo fails, continue
- * onto the next address
- */
- gf_msg_trace (this->name, 0,
- "getaddrinfo for %s failed (%s)",
- address->hostname, gai_strerror (ret));
- ret = 0;
- continue;
- }
-
- for (tmp = paddr; tmp != NULL; tmp = tmp->ai_next) {
- if (gf_compare_sockaddr (addr->ai_addr,
- tmp->ai_addr)) {
- found = peer; /* (de)referenced? */
- break;
- }
- }
-
- freeaddrinfo (paddr);
- if (found)
- goto unlock;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ glusterd_peerinfo_t *peer = NULL;
+ glusterd_peerinfo_t *found = NULL;
+ glusterd_peer_hostname_t *address = NULL;
+ int ret = 0;
+ struct addrinfo *paddr = NULL;
+ struct addrinfo *tmp = NULL;
+
+ this = THIS;
+ GF_ASSERT(this != NULL);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, (conf != NULL), out);
+
+ GF_VALIDATE_OR_GOTO(this->name, (addr != NULL), out);
+
+ rcu_read_lock();
+ cds_list_for_each_entry_rcu(peer, &conf->peers, uuid_list)
+ {
+ cds_list_for_each_entry_rcu(address, &peer->hostnames, hostname_list)
+ {
+ /* TODO: Cache the resolved addrinfos to improve
+ * performance
+ */
+ ret = getaddrinfo(address->hostnam