diff options
author | Gluster Ant <bugzilla-bot@gluster.org> | 2018-09-12 17:52:45 +0530 |
---|---|---|
committer | Nigel Babu <nigelb@redhat.com> | 2018-09-12 17:52:45 +0530 |
commit | e16868dede6455cab644805af6fe1ac312775e13 (patch) | |
tree | 15aebdb4fff2d87cf8a72f836816b3aa634da58d /xlators/mgmt/glusterd/src | |
parent | 45a71c0548b6fd2c757aa2e7b7671a1411948894 (diff) |
Land part 2 of clang-format changes
Change-Id: Ia84cc24c8924e6d22d02ac15f611c10e26db99b4
Signed-off-by: Nigel Babu <nigelb@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src')
48 files changed, 84470 insertions, 86631 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c index 69c7075..b01c259 100644 --- a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c +++ b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c @@ -17,193 +17,190 @@ #include "glusterd-svc-helper.h" void -glusterd_bitdsvc_build (glusterd_svc_t *svc) +glusterd_bitdsvc_build(glusterd_svc_t *svc) { - svc->manager = glusterd_bitdsvc_manager; - svc->start = glusterd_bitdsvc_start; - svc->stop = glusterd_bitdsvc_stop; + svc->manager = glusterd_bitdsvc_manager; + svc->start = glusterd_bitdsvc_start; + svc->stop = glusterd_bitdsvc_stop; } int -glusterd_bitdsvc_init (glusterd_svc_t *svc) +glusterd_bitdsvc_init(glusterd_svc_t *svc) { - return glusterd_svc_init (svc, bitd_svc_name); + return glusterd_svc_init(svc, bitd_svc_name); } static int -glusterd_bitdsvc_create_volfile () +glusterd_bitdsvc_create_volfile() { - char filepath[PATH_MAX] = {0,}; - int ret = -1; - glusterd_conf_t *conf = NULL; - xlator_t *this = NULL; - - this = THIS; - conf = this->private; - GF_ASSERT (conf); - - - glusterd_svc_build_volfile_path (bitd_svc_name, conf->workdir, - filepath, sizeof (filepath)); - - ret = glusterd_create_global_volfile (build_bitd_graph, - filepath, NULL); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOLFILE_CREATE_FAIL, - "Failed to create volfile"); - goto out; - } + char filepath[PATH_MAX] = { + 0, + }; + int ret = -1; + glusterd_conf_t *conf = NULL; + xlator_t *this = NULL; + + this = THIS; + conf = this->private; + GF_ASSERT(conf); + + glusterd_svc_build_volfile_path(bitd_svc_name, conf->workdir, filepath, + sizeof(filepath)); + + ret = glusterd_create_global_volfile(build_bitd_graph, filepath, NULL); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, + "Failed to create volfile"); + goto out; + } out: - gf_msg_debug (this->name, 0, "Returning %d", ret); + gf_msg_debug(this->name, 0, "Returning %d", ret); - return ret; + return ret; } int -glusterd_bitdsvc_manager (glusterd_svc_t *svc, void *data, int flags) +glusterd_bitdsvc_manager(glusterd_svc_t *svc, void *data, int flags) { - int ret = 0; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - if (!svc->inited) { - ret = glusterd_bitdsvc_init (svc); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_BITD_INIT_FAIL, "Failed to init " - "bitd service"); - goto out; - } else { - svc->inited = _gf_true; - gf_msg_debug (this->name, 0, "BitD service " - "initialized"); - } - } + int ret = 0; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); - if (glusterd_should_i_stop_bitd ()) { - ret = svc->stop (svc, SIGTERM); + if (!svc->inited) { + ret = glusterd_bitdsvc_init(svc); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITD_INIT_FAIL, + "Failed to init " + "bitd service"); + goto out; } else { - ret = glusterd_bitdsvc_create_volfile (); - if (ret) - goto out; + svc->inited = _gf_true; + gf_msg_debug(this->name, 0, + "BitD service " + "initialized"); + } + } + + if (glusterd_should_i_stop_bitd()) { + ret = svc->stop(svc, SIGTERM); + } else { + ret = glusterd_bitdsvc_create_volfile(); + if (ret) + goto out; - ret = svc->stop (svc, SIGKILL); - if (ret) - goto out; + ret = svc->stop(svc, SIGKILL); + if (ret) + goto out; - ret = svc->start (svc, flags); - if (ret) - goto out; + ret = svc->start(svc, flags); + if (ret) + goto out; - ret = glusterd_conn_connect (&(svc->conn)); - if (ret) - goto out; - } + ret = glusterd_conn_connect(&(svc->conn)); + if (ret) + goto out; + } out: - if (ret) - gf_event (EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name); + if (ret) + gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name); - gf_msg_debug (THIS->name, 0, "Returning %d", ret); + gf_msg_debug(THIS->name, 0, "Returning %d", ret); - return ret; + return ret; } int -glusterd_bitdsvc_start (glusterd_svc_t *svc, int flags) +glusterd_bitdsvc_start(glusterd_svc_t *svc, int flags) { - int ret = -1; - dict_t *cmdict = NULL; + int ret = -1; + dict_t *cmdict = NULL; - cmdict = dict_new (); - if (!cmdict) - goto error_return; + cmdict = dict_new(); + if (!cmdict) + goto error_return; - ret = dict_set_str (cmdict, "cmdarg0", "--global-timer-wheel"); - if (ret) - goto dealloc_dict; + ret = dict_set_str(cmdict, "cmdarg0", "--global-timer-wheel"); + if (ret) + goto dealloc_dict; - ret = glusterd_svc_start (svc, flags, cmdict); + ret = glusterd_svc_start(svc, flags, cmdict); - dealloc_dict: - dict_unref (cmdict); - error_return: - return ret; +dealloc_dict: + dict_unref(cmdict); +error_return: + return ret; } int -glusterd_bitdsvc_stop (glusterd_svc_t *svc, int sig) +glusterd_bitdsvc_stop(glusterd_svc_t *svc, int sig) { - return glusterd_svc_stop (svc, sig); + return glusterd_svc_stop(svc, sig); } int -glusterd_bitdsvc_reconfigure () +glusterd_bitdsvc_reconfigure() { - int ret = -1; - xlator_t *this = NULL; - glusterd_conf_t *priv = NULL; - gf_boolean_t identical = _gf_false; - - this = THIS; - GF_VALIDATE_OR_GOTO (this->name, this, out); - - priv = this->private; - GF_VALIDATE_OR_GOTO (this->name, priv, out); - - if (glusterd_should_i_stop_bitd ()) - goto manager; - /* - * Check both OLD and NEW volfiles, if they are SAME by size - * and cksum i.e. "character-by-character". If YES, then - * NOTHING has been changed, just return. - */ - ret = glusterd_svc_check_volfile_identical (priv->bitd_svc.name, - build_bitd_graph, - &identical); - if (ret) - goto out; - if (identical) { - ret = 0; - goto out; - } - - /* - * They are not identical. Find out if the topology is changed - * OR just the volume options. If just the options which got - * changed, then inform the xlator to reconfigure the options. - */ - identical = _gf_false; /* RESET the FLAG */ - ret = glusterd_svc_check_topology_identical (priv->bitd_svc.name, - build_bitd_graph, - &identical); - if (ret) - goto out; /*not able to compare due to some corruption */ - - /* Topology is not changed, but just the options. But write the - * options to bitd volfile, so that bitd will be reconfigured. - */ - if (identical) { - ret = glusterd_bitdsvc_create_volfile (); - if (ret == 0) {/* Only if above PASSES */ - ret = glusterd_fetchspec_notify (THIS); - } - goto out; + int ret = -1; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + gf_boolean_t identical = _gf_false; + + this = THIS; + GF_VALIDATE_OR_GOTO(this->name, this, out); + + priv = this->private; + GF_VALIDATE_OR_GOTO(this->name, priv, out); + + if (glusterd_should_i_stop_bitd()) + goto manager; + /* + * Check both OLD and NEW volfiles, if they are SAME by size + * and cksum i.e. "character-by-character". If YES, then + * NOTHING has been changed, just return. + */ + ret = glusterd_svc_check_volfile_identical(priv->bitd_svc.name, + build_bitd_graph, &identical); + if (ret) + goto out; + if (identical) { + ret = 0; + goto out; + } + + /* + * They are not identical. Find out if the topology is changed + * OR just the volume options. If just the options which got + * changed, then inform the xlator to reconfigure the options. + */ + identical = _gf_false; /* RESET the FLAG */ + ret = glusterd_svc_check_topology_identical(priv->bitd_svc.name, + build_bitd_graph, &identical); + if (ret) + goto out; /*not able to compare due to some corruption */ + + /* Topology is not changed, but just the options. But write the + * options to bitd volfile, so that bitd will be reconfigured. + */ + if (identical) { + ret = glusterd_bitdsvc_create_volfile(); + if (ret == 0) { /* Only if above PASSES */ + ret = glusterd_fetchspec_notify(THIS); } + goto out; + } manager: - /* - * bitd volfile's topology has been changed. bitd server needs - * to be RESTARTED to ACT on the changed volfile. - */ - ret = priv->bitd_svc.manager (&(priv->bitd_svc), NULL, - PROC_START_NO_WAIT); + /* + * bitd volfile's topology has been changed. bitd server needs + * to be RESTARTED to ACT on the changed volfile. + */ + ret = priv->bitd_svc.manager(&(priv->bitd_svc), NULL, PROC_START_NO_WAIT); out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c index 10babdb..0608bad 100644 --- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c +++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c @@ -27,712 +27,711 @@ #include <dlfcn.h> const char *gd_bitrot_op_list[GF_BITROT_OPTION_TYPE_MAX] = { - [GF_BITROT_OPTION_TYPE_NONE] = "none", - [GF_BITROT_OPTION_TYPE_ENABLE] = "enable", - [GF_BITROT_OPTION_TYPE_DISABLE] = "disable", - [GF_BITROT_OPTION_TYPE_SCRUB_THROTTLE] = "scrub-throttle", - [GF_BITROT_OPTION_TYPE_SCRUB_FREQ] = "scrub-frequency", - [GF_BITROT_OPTION_TYPE_SCRUB] = "scrub", - [GF_BITROT_OPTION_TYPE_EXPIRY_TIME] = "expiry-time", + [GF_BITROT_OPTION_TYPE_NONE] = "none", + [GF_BITROT_OPTION_TYPE_ENABLE] = "enable", + [GF_BITROT_OPTION_TYPE_DISABLE] = "disable", + [GF_BITROT_OPTION_TYPE_SCRUB_THROTTLE] = "scrub-throttle", + [GF_BITROT_OPTION_TYPE_SCRUB_FREQ] = "scrub-frequency", + [GF_BITROT_OPTION_TYPE_SCRUB] = "scrub", + [GF_BITROT_OPTION_TYPE_EXPIRY_TIME] = "expiry-time", }; int -__glusterd_handle_bitrot (rpcsvc_request_t *req) +__glusterd_handle_bitrot(rpcsvc_request_t *req) { - int32_t ret = -1; - gf_cli_req cli_req = { {0,} }; - dict_t *dict = NULL; - glusterd_op_t cli_op = GD_OP_BITROT; - char *volname = NULL; - char *scrub = NULL; - int32_t type = 0; - char msg[256] = {0,}; - xlator_t *this = NULL; - glusterd_conf_t *conf = NULL; - - GF_ASSERT (req); - - this = THIS; - GF_ASSERT (this); - - conf = this->private; - GF_ASSERT (conf); - - ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + int32_t ret = -1; + gf_cli_req cli_req = {{ + 0, + }}; + dict_t *dict = NULL; + glusterd_op_t cli_op = GD_OP_BITROT; + char *volname = NULL; + char *scrub = NULL; + int32_t type = 0; + char msg[256] = { + 0, + }; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + + GF_ASSERT(req); + + this = THIS; + GF_ASSERT(this); + + conf = this->private; + GF_ASSERT(conf); + + ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + if (cli_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new(); + + ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, + &dict); if (ret < 0) { - req->rpc_err = GARBAGE_ARGS; - goto out; - } - - if (cli_req.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); - - ret = dict_unserialize (cli_req.dict.dict_val, - cli_req.dict.dict_len, - &dict); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to " - "unserialize req-buffer to dictionary"); - snprintf (msg, sizeof (msg), "Unable to decode the " - "command"); - goto out; - } else { - dict->extra_stdfree = cli_req.dict.dict_val; - } - } - - ret = dict_get_str (dict, "volname", &volname); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf(msg, sizeof(msg), + "Unable to decode the " + "command"); + goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; + } + } + + ret = dict_get_str(dict, "volname", &volname); + if (ret) { + snprintf(msg, sizeof(msg), "Unable to get volume name"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name, " + "while handling bitrot command"); + goto out; + } + + ret = dict_get_int32(dict, "type", &type); + if (ret) { + snprintf(msg, sizeof(msg), "Unable to get type of command"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get type of cmd, " + "while handling bitrot command"); + goto out; + } + + if (conf->op_version < GD_OP_VERSION_3_7_0) { + snprintf(msg, sizeof(msg), + "Cannot execute command. The " + "cluster is operating at version %d. Bitrot command " + "%s is unavailable in this version", + conf->op_version, gd_bitrot_op_list[type]); + ret = -1; + goto out; + } + + if (type == GF_BITROT_CMD_SCRUB_STATUS) { + /* Backward compatibility handling for scrub status command*/ + if (conf->op_version < GD_OP_VERSION_3_7_7) { + snprintf(msg, sizeof(msg), + "Cannot execute command. " + "The cluster is operating at version %d. " + "Bitrot scrub status command unavailable in " + "this version", + conf->op_version); + ret = -1; + goto out; + } + + ret = dict_get_str(dict, "scrub-value", &scrub); if (ret) { - snprintf (msg, sizeof (msg), "Unable to get volume name"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get volume name, " - "while handling bitrot command"); - goto out; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Failed to get scrub value."); + ret = -1; + goto out; } - ret = dict_get_int32 (dict, "type", &type); - if (ret) { - snprintf (msg, sizeof (msg), "Unable to get type of command"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get type of cmd, " - "while handling bitrot command"); - goto out; + if (!strncmp(scrub, "status", SLEN("status"))) { + ret = glusterd_op_begin_synctask(req, GD_OP_SCRUB_STATUS, dict); + goto out; } + } - if (conf->op_version < GD_OP_VERSION_3_7_0) { - snprintf (msg, sizeof (msg), "Cannot execute command. The " - "cluster is operating at version %d. Bitrot command " - "%s is unavailable in this version", conf->op_version, - gd_bitrot_op_list[type]); - ret = -1; - goto out; + if (type == GF_BITROT_CMD_SCRUB_ONDEMAND) { + /* Backward compatibility handling for scrub status command*/ + if (conf->op_version < GD_OP_VERSION_3_9_0) { + snprintf(msg, sizeof(msg), + "Cannot execute command. " + "The cluster is operating at version %d. " + "Bitrot scrub ondemand command unavailable in " + "this version", + conf->op_version); + ret = -1; + goto out; } - if (type == GF_BITROT_CMD_SCRUB_STATUS) { - /* Backward compatibility handling for scrub status command*/ - if (conf->op_version < GD_OP_VERSION_3_7_7) { - snprintf (msg, sizeof (msg), "Cannot execute command. " - "The cluster is operating at version %d. " - "Bitrot scrub status command unavailable in " - "this version", conf->op_version); - ret = -1; - goto out; - } - - ret = dict_get_str (dict, "scrub-value", &scrub); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, - "Failed to get scrub value."); - ret = -1; - goto out; - } - - if (!strncmp (scrub, "status", SLEN ("status"))) { - ret = glusterd_op_begin_synctask (req, - GD_OP_SCRUB_STATUS, - dict); - goto out; - } + ret = dict_get_str(dict, "scrub-value", &scrub); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Failed to get scrub value."); + ret = -1; + goto out; } - if (type == GF_BITROT_CMD_SCRUB_ONDEMAND) { - /* Backward compatibility handling for scrub status command*/ - if (conf->op_version < GD_OP_VERSION_3_9_0) { - snprintf (msg, sizeof (msg), "Cannot execute command. " - "The cluster is operating at version %d. " - "Bitrot scrub ondemand command unavailable in " - "this version", conf->op_version); - ret = -1; - goto out; - } - - ret = dict_get_str (dict, "scrub-value", &scrub); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, - "Failed to get scrub value."); - ret = -1; - goto out; - } - - if (!strncmp (scrub, "ondemand", SLEN ("ondemand"))) { - ret = glusterd_op_begin_synctask (req, - GD_OP_SCRUB_ONDEMAND, - dict); - goto out; - } + if (!strncmp(scrub, "ondemand", SLEN("ondemand"))) { + ret = glusterd_op_begin_synctask(req, GD_OP_SCRUB_ONDEMAND, dict); + goto out; } + } - ret = glusterd_op_begin_synctask (req, GD_OP_BITROT, dict); + ret = glusterd_op_begin_synctask(req, GD_OP_BITROT, dict); out: - if (ret) { - if (msg[0] == '\0') - snprintf (msg, sizeof (msg), "Bitrot operation failed"); - ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, - dict, msg); - } + if (ret) { + if (msg[0] == '\0') + snprintf(msg, sizeof(msg), "Bitrot operation failed"); + ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, msg); + } - return ret; + return ret; } int -glusterd_handle_bitrot (rpcsvc_request_t *req) +glusterd_handle_bitrot(rpcsvc_request_t *req) { - return glusterd_big_locked_handler (req, __glusterd_handle_bitrot); + return glusterd_big_locked_handler(req, __glusterd_handle_bitrot); } static int -glusterd_bitrot_scrub_throttle (glusterd_volinfo_t *volinfo, dict_t *dict, - char *key, char **op_errstr) +glusterd_bitrot_scrub_throttle(glusterd_volinfo_t *volinfo, dict_t *dict, + char *key, char **op_errstr) { - int32_t ret = -1; - char *scrub_throttle = NULL; - char *option = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - ret = dict_get_str (dict, "scrub-throttle-value", &scrub_throttle); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to fetch scrub-" - "throttle value"); - goto out; - } - - option = gf_strdup (scrub_throttle); - ret = dict_set_dynstr (volinfo->dict, key, option); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "Failed to set option %s", - key); - goto out; - } - - ret = glusterd_scrubsvc_reconfigure (); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SCRUBSVC_RECONF_FAIL, - "Failed to reconfigure scrub " - "services"); - goto out; - } + int32_t ret = -1; + char *scrub_throttle = NULL; + char *option = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + ret = dict_get_str(dict, "scrub-throttle-value", &scrub_throttle); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to fetch scrub-" + "throttle value"); + goto out; + } + + option = gf_strdup(scrub_throttle); + ret = dict_set_dynstr(volinfo->dict, key, option); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to set option %s", key); + goto out; + } + + ret = glusterd_scrubsvc_reconfigure(); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SCRUBSVC_RECONF_FAIL, + "Failed to reconfigure scrub " + "services"); + goto out; + } out: - return ret; + return ret; } static int -glusterd_bitrot_scrub_freq (glusterd_volinfo_t *volinfo, dict_t *dict, - char *key, char **op_errstr) +glusterd_bitrot_scrub_freq(glusterd_volinfo_t *volinfo, dict_t *dict, char *key, + char **op_errstr) { - int32_t ret = -1; - char *scrub_freq = NULL; - xlator_t *this = NULL; - char *option = NULL; - - this = THIS; - GF_ASSERT (this); - - ret = dict_get_str (dict, "scrub-frequency-value", &scrub_freq); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to fetch scrub-" - "freq value"); - goto out; - } - - option = gf_strdup (scrub_freq); - ret = dict_set_dynstr (volinfo->dict, key, option); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "Failed to set option %s", - key); - goto out; - } - - ret = glusterd_scrubsvc_reconfigure (); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SCRUBSVC_RECONF_FAIL, - "Failed to reconfigure scrub " - "services"); - goto out; - } + int32_t ret = -1; + char *scrub_freq = NULL; + xlator_t *this = NULL; + char *option = NULL; + + this = THIS; + GF_ASSERT(this); + + ret = dict_get_str(dict, "scrub-frequency-value", &scrub_freq); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to fetch scrub-" + "freq value"); + goto out; + } + + option = gf_strdup(scrub_freq); + ret = dict_set_dynstr(volinfo->dict, key, option); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to set option %s", key); + goto out; + } + + ret = glusterd_scrubsvc_reconfigure(); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SCRUBSVC_RECONF_FAIL, + "Failed to reconfigure scrub " + "services"); + goto out; + } out: - return ret; + return ret; } static int -glusterd_bitrot_scrub (glusterd_volinfo_t *volinfo, dict_t *dict, - char *key, char **op_errstr) +glusterd_bitrot_scrub(glusterd_volinfo_t *volinfo, dict_t *dict, char *key, + char **op_errstr) { - int32_t ret = -1; - char *scrub_value = NULL; - xlator_t *this = NULL; - char *option = NULL; - - this = THIS; - GF_ASSERT (this); - - ret = dict_get_str (dict, "scrub-value", &scrub_value); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, "Unable to fetch scrub" - "value"); - goto out; - } - - if (!strcmp (scrub_value, "resume")) { - option = gf_strdup ("Active"); - } else { - option = gf_strdup (scrub_value); - } - - ret = dict_set_dynstr (volinfo->dict, key, option); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "Failed to set option %s", - key); - goto out; - } - - ret = glusterd_scrubsvc_reconfigure (); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SCRUBSVC_RECONF_FAIL, - "Failed to reconfigure scrub " - "services"); - goto out; - } + int32_t ret = -1; + char *scrub_value = NULL; + xlator_t *this = NULL; + char *option = NULL; + + this = THIS; + GF_ASSERT(this); + + ret = dict_get_str(dict, "scrub-value", &scrub_value); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to fetch scrub" + "value"); + goto out; + } + + if (!strcmp(scrub_value, "resume")) { + option = gf_strdup("Active"); + } else { + option = gf_strdup(scrub_value); + } + + ret = dict_set_dynstr(volinfo->dict, key, option); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to set option %s", key); + goto out; + } + + ret = glusterd_scrubsvc_reconfigure(); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SCRUBSVC_RECONF_FAIL, + "Failed to reconfigure scrub " + "services"); + goto out; + } out: - return ret; + return ret; } static int -glusterd_bitrot_expiry_time (glusterd_volinfo_t *volinfo, dict_t *dict, - char *key, char **op_errstr) +glusterd_bitrot_expiry_time(glusterd_volinfo_t *volinfo, dict_t *dict, + char *key, char **op_errstr) { - int32_t ret = -1; - uint32_t expiry_time = 0; - xlator_t *this = NULL; - char dkey[1024] = {0,}; - - this = THIS; - GF_ASSERT (this); - - ret = dict_get_uint32 (dict, "expiry-time", &expiry_time); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get bitrot expiry" - " timer value."); - goto out; - } - - snprintf (dkey, sizeof (dkey), "%d", expiry_time); - - ret = dict_set_dynstr_with_alloc (volinfo->dict, key, dkey); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "Failed to set option %s", - key); - goto out; - } - - ret = glusterd_bitdsvc_reconfigure (); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_BITDSVC_RECONF_FAIL, - "Failed to reconfigure bitrot" - "services"); - goto out; - } + int32_t ret = -1; + uint32_t expiry_time = 0; + xlator_t *this = NULL; + char dkey[1024] = { + 0, + }; + + this = THIS; + GF_ASSERT(this); + + ret = dict_get_uint32(dict, "expiry-time", &expiry_time); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get bitrot expiry" + " timer value."); + goto out; + } + + snprintf(dkey, sizeof(dkey), "%d", expiry_time); + + ret = dict_set_dynstr_with_alloc(volinfo->dict, key, dkey); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to set option %s", key); + goto out; + } + + ret = glusterd_bitdsvc_reconfigure(); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITDSVC_RECONF_FAIL, + "Failed to reconfigure bitrot" + "services"); + goto out; + } out: - return ret; + return ret; } static int -glusterd_bitrot_enable (glusterd_volinfo_t *volinfo, char **op_errstr) +glusterd_bitrot_enable(glusterd_volinfo_t *volinfo, char **op_errstr) { - int32_t ret = -1; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_VALIDATE_OR_GOTO (this->name, volinfo, out); - GF_VALIDATE_OR_GOTO (this->name, op_errstr, out); - - if (glusterd_is_volume_started (volinfo) == 0) { - *op_errstr = gf_strdup ("Volume is stopped, start volume " - "to enable bitrot."); - ret = -1; - goto out; - } - - ret = glusterd_is_bitrot_enabled (volinfo); - if (ret) { - *op_errstr = gf_strdup ("Bitrot is already enabled"); - ret = -1; - goto out; - } - - ret = dict_set_dynstr_with_alloc (volinfo->dict, VKEY_FEATURES_BITROT, - "on"); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "dict set failed"); - goto out; - } - - /*Once bitrot is enable scrubber should be in Active state*/ - ret = dict_set_dynstr_with_alloc (volinfo->dict, "features.scrub", - "Active"); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "Failed to set option " - "features.scrub value"); - goto out; - } - - ret = 0; + int32_t ret = -1; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_VALIDATE_OR_GOTO(this->name, volinfo, out); + GF_VALIDATE_OR_GOTO(this->name, op_errstr, out); + + if (glusterd_is_volume_started(volinfo) == 0) { + *op_errstr = gf_strdup( + "Volume is stopped, start volume " + "to enable bitrot."); + ret = -1; + goto out; + } + + ret = glusterd_is_bitrot_enabled(volinfo); + if (ret) { + *op_errstr = gf_strdup("Bitrot is already enabled"); + ret = -1; + goto out; + } + + ret = dict_set_dynstr_with_alloc(volinfo->dict, VKEY_FEATURES_BITROT, "on"); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "dict set failed"); + goto out; + } + + /*Once bitrot is enable scrubber should be in Active state*/ + ret = dict_set_dynstr_with_alloc(volinfo->dict, "features.scrub", "Active"); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to set option " + "features.scrub value"); + goto out; + } + + ret = 0; out: - if (ret && op_errstr && !*op_errstr) - gf_asprintf (op_errstr, "Enabling bitrot on volume %s has been " - "unsuccessful", volinfo->volname); - return ret; + if (ret && op_errstr && !*op_errstr) + gf_asprintf(op_errstr, + "Enabling bitrot on volume %s has been " + "unsuccessful", + volinfo->volname); + return ret; } static int -glusterd_bitrot_disable (glusterd_volinfo_t *volinfo, char **op_errstr) +glusterd_bitrot_disable(glusterd_volinfo_t *volinfo, char **op_errstr) { - int32_t ret = -1; - xlator_t *this = NULL; - - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); - - GF_VALIDATE_OR_GOTO (this->name, volinfo, out); - GF_VALIDATE_OR_GOTO (this->name, op_errstr, out); - - ret = dict_set_dynstr_with_alloc (volinfo->dict, VKEY_FEATURES_BITROT, - "off"); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "dict set failed"); - goto out; - } - - /*Once bitrot disabled scrubber should be Inactive state*/ - ret = dict_set_dynstr_with_alloc (volinfo->dict, "features.scrub", - "Inactive"); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "Failed to set " - "features.scrub value"); - goto out; - } - - ret = 0; + int32_t ret = -1; + xlator_t *this = NULL; + + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); + + GF_VALIDATE_OR_GOTO(this->name, volinfo, out); + GF_VALIDATE_OR_GOTO(this->name, op_errstr, out); + + ret = dict_set_dynstr_with_alloc(volinfo->dict, VKEY_FEATURES_BITROT, + "off"); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "dict set failed"); + goto out; + } + + /*Once bitrot disabled scrubber should be Inactive state*/ + ret = dict_set_dynstr_with_alloc(volinfo->dict, "features.scrub", + "Inactive"); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to set " + "features.scrub value"); + goto out; + } + + ret = 0; out: - if (ret && op_errstr && !*op_errstr) - gf_asprintf (op_errstr, "Disabling bitrot on volume %s has " - "been unsuccessful", volinfo->volname); - return ret; + if (ret && op_errstr && !*op_errstr) + gf_asprintf(op_errstr, + "Disabling bitrot on volume %s has " + "been unsuccessful", + volinfo->volname); + return ret; } gf_boolean_t -glusterd_should_i_stop_bitd () +glusterd_should_i_stop_bitd() { - glusterd_conf_t *conf = THIS->private; - glusterd_volinfo_t *volinfo = NULL; - gf_boolean_t stopped = _gf_true; - glusterd_brickinfo_t *brickinfo = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - cds_list_for_each_entry (volinfo, &conf->volumes, vol_list) { - if (!glusterd_is_bitrot_enabled (volinfo)) - continue; - else if (volinfo->status != GLUSTERD_STATUS_STARTED) - continue; - else { - cds_list_for_each_entry (brickinfo, &volinfo->bricks, - brick_list) { - if (!glusterd_is_local_brick (this, volinfo, - brickinfo)) - continue; - stopped = _gf_false; - return stopped; - } - - /* Before stopping bitrot/scrubber daemon check - * other volume also whether respective volume - * host a brick from this node or not.*/ - continue; - } - } - - return stopped; + glusterd_conf_t *conf = THIS->private; + glusterd_volinfo_t *volinfo = NULL; + gf_boolean_t stopped = _gf_true; + glusterd_brickinfo_t *brickinfo = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + cds_list_for_each_entry(volinfo, &conf->volumes, vol_list) + { + if (!glusterd_is_bitrot_enabled(volinfo)) + continue; + else if (volinfo->status != GLUSTERD_STATUS_STARTED) + continue; + else { + cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list) + { + if (!glusterd_is_local_brick(this, volinfo, brickinfo)) + continue; + stopped = _gf_false; + return stopped; + } + + /* Before stopping bitrot/scrubber daemon check + * other volume also whether respective volume + * host a brick from this node or not.*/ + continue; + } + } + + return stopped; } static int -glusterd_manage_bitrot (int opcode) +glusterd_manage_bitrot(int opcode) { - int ret = -1; - xlator_t *this = NULL; - glusterd_conf_t *priv = NULL; + int ret = -1; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; - this = THIS; - GF_ASSERT (this); + this = THIS; + GF_ASSERT(this); - priv = this->private; - GF_ASSERT (priv); + priv = this->private; + GF_ASSERT(priv); - switch (opcode) { + switch (opcode) { case GF_BITROT_OPTION_TYPE_ENABLE: case GF_BITROT_OPTION_TYPE_DISABLE: - ret = priv->bitd_svc.manager (&(priv->bitd_svc), - NULL, PROC_START_NO_WAIT); - if (ret) - break; - ret = priv->scrub_svc.manager (&(priv->scrub_svc), NULL, - PROC_START_NO_WAIT); + ret = priv->bitd_svc.manager(&(priv->bitd_svc), NULL, + PROC_START_NO_WAIT); + if (ret) break; + ret = priv->scrub_svc.manager(&(priv->scrub_svc), NULL, + PROC_START_NO_WAIT); + break; default: - ret = 0; - break; - } - - return ret; + ret = 0; + break; + } + return ret; } int -glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict) +glusterd_op_bitrot(dict_t *dict, char **op_errstr, dict_t *rsp_dict) { - glusterd_volinfo_t *volinfo = NULL; - int32_t ret = -1; - char *volname = NULL; - int type = -1; - glusterd_conf_t *priv = NULL; - xlator_t *this = NULL; - - GF_ASSERT (dict); - GF_ASSERT (op_errstr); - - this = THIS; - GF_ASSERT (this); - priv = this->private; - GF_ASSERT (priv); - - ret = dict_get_str (dict, "volname", &volname); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get volume name"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); - if (ret) { - gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname); - goto out; - } - - ret = dict_get_int32 (dict, "type", &type); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get type from " - "dict"); - goto out; - } - - switch (type) { + glusterd_volinfo_t *volinfo = NULL; + int32_t ret = -1; + char *volname = NULL; + int type = -1; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + + GF_ASSERT(dict); + GF_ASSERT(op_errstr); + + this = THIS; + GF_ASSERT(this); + priv = this->private; + GF_ASSERT(priv); + + ret = dict_get_str(dict, "volname", &volname); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname); + goto out; + } + + ret = dict_get_int32(dict, "type", &type); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get type from " + "dict"); + goto out; + } + + switch (type) { case GF_BITROT_OPTION_TYPE_ENABLE: - ret = glusterd_bitrot_enable (volinfo, op_errstr); - if (ret < 0) - goto out; - break; + ret = glusterd_bitrot_enable(volinfo, op_errstr); + if (ret < 0) + goto out; + break; case GF_BITROT_OPTION_TYPE_DISABLE: - ret = glusterd_bitrot_disable (volinfo, op_errstr); - if (ret < 0) - goto out; + ret = glusterd_bitrot_disable(volinfo, op_errstr); + if (ret < 0) + goto out; - break; + break; case GF_BITROT_OPTION_TYPE_SCRUB_THROTTLE: - ret = glusterd_bitrot_scrub_throttle (volinfo, dict, - "features.scrub-throttle", - op_errstr); - if (ret) - goto out; - break; + ret = glusterd_bitrot_scrub_throttle( + volinfo, dict, "features.scrub-throttle", op_errstr); + if (ret) + goto out; + break; case GF_BITROT_OPTION_TYPE_SCRUB_FREQ: - ret = glusterd_bitrot_scrub_freq (volinfo, dict, - "features.scrub-freq", - op_errstr); - if (ret) - goto out; - break; + ret = glusterd_bitrot_scrub_freq(volinfo, dict, + "features.scrub-freq", op_errstr); + if (ret) + goto out; + break; case GF_BITROT_OPTION_TYPE_SCRUB: - ret = glusterd_bitrot_scrub (volinfo, dict, "features.scrub", - op_errstr); - if (ret) - goto out; - break; + ret = glusterd_bitrot_scrub(volinfo, dict, "features.scrub", + op_errstr); + if (ret) + goto out; + break; case GF_BITROT_OPTION_TYPE_EXPIRY_TIME: - ret = glusterd_bitrot_expiry_time (volinfo, dict, - "features.expiry-time", - op_errstr); - if (ret) - goto out; + ret = glusterd_bitrot_expiry_time( + volinfo, dict, "features.expiry-time", op_errstr); + if (ret) + goto out; case GF_BITROT_CMD_SCRUB_STATUS: case GF_BITROT_CMD_SCRUB_ONDEMAND: - break; + break; default: - gf_asprintf (op_errstr, "Bitrot command failed. Invalid " - "opcode"); - ret = -1; - goto out; - } - - ret = glusterd_manage_bitrot (type); - if (ret) - goto out; - - ret = glusterd_create_volfiles_and_notify_services (volinfo); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOLFILE_CREATE_FAIL, "Unable to re-create " - "volfiles"); - ret = -1; - goto out; - } - - ret = glusterd_store_volinfo (volinfo, - GLUSTERD_VOLINFO_VER_AC_INCREMENT); - if (ret) { - gf_msg_debug (this->name, 0, "Failed to store volinfo for " - "bitrot"); - goto out; - } + gf_asprintf(op_errstr, + "Bitrot command failed. Invalid " + "opcode"); + ret = -1; + goto out; + } + + ret = glusterd_manage_bitrot(type); + if (ret) + goto out; + + ret = glusterd_create_volfiles_and_notify_services(volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, + "Unable to re-create " + "volfiles"); + ret = -1; + goto out; + } + + ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); + if (ret) { + gf_msg_debug(this->name, 0, + "Failed to store volinfo for " + "bitrot"); + goto out; + } out: - return ret; + return ret; } int -glusterd_op_stage_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict) +glusterd_op_stage_bitrot(dict_t *dict, char **op_errstr, dict_t *rsp_dict) { - int ret = 0; - char *volname = NULL; - char *scrub_cmd = NULL; - char *scrub_cmd_from_dict = NULL; - char msg[2048] = {0,}; - int type = 0; - xlator_t *this = NULL; - glusterd_conf_t *priv = NULL; - glusterd_volinfo_t *volinfo = NULL; - - this = THIS; - GF_ASSERT (this); - priv = this->private; - GF_ASSERT (priv); - - GF_ASSERT (dict); - GF_ASSERT (op_errstr); - - ret = dict_get_str (dict, "volname", &volname); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get volume name"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); - if (ret) { - gf_asprintf (op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname); - goto out; - } - - if (!glusterd_is_volume_started (volinfo)) { - *op_errstr = gf_strdup ("Volume is stopped, start volume " - "before executing bit rot command."); + int ret = 0; + char *volname = NULL; + char *scrub_cmd = NULL; + char *scrub_cmd_from_dict = NULL; + char msg[2048] = { + 0, + }; + int type = 0; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + glusterd_volinfo_t *volinfo = NULL; + + this = THIS; + GF_ASSERT(this); + priv = this->private; + GF_ASSERT(priv); + + GF_ASSERT(dict); + GF_ASSERT(op_errstr); + + ret = dict_get_str(dict, "volname", &volname); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname); + goto out; + } + + if (!glusterd_is_volume_started(volinfo)) { + *op_errstr = gf_strdup( + "Volume is stopped, start volume " + "before executing bit rot command."); + ret = -1; + goto out; + } + + ret = dict_get_int32(dict, "type", &type); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get type for " + "operation"); + + *op_errstr = gf_strdup( + "Staging stage failed for bitrot " + "operation."); + goto out; + } + + if ((GF_BITROT_OPTION_TYPE_ENABLE != type) && + (glusterd_is_bitrot_enabled(volinfo) == 0)) { + ret = -1; + gf_asprintf(op_errstr, "Bitrot is not enabled on volume %s", volname); + goto out; + } + + if ((GF_BITROT_OPTION_TYPE_SCRUB == type)) { + ret = dict_get_str(volinfo->dict, "features.scrub", + &scrub_cmd_from_dict); + if (!ret) { + ret = dict_get_str(dict, "scrub-value", &scrub_cmd); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to " + "get scrub-value"); + *op_errstr = gf_strdup( + "Staging failed for " + "bitrot operation. " + "Please check log file" + " for more details."); + goto out; + } + /* If scrubber is resume then value of scrubber will be + * "Active" in the dictionary. */ + if (!strcmp(scrub_cmd_from_dict, scrub_cmd) || + (!strncmp("Active", scrub_cmd_from_dict, SLEN("Active")) && + !strncmp("resume", scrub_cmd, SLEN("resume")))) { + snprintf(msg, sizeof(msg), + "Scrub is already" + " %sd for volume %s", + scrub_cmd, volinfo->volname); + *op_errstr = gf_strdup(msg); ret = -1; goto out; + } } + ret = 0; + } - ret = dict_get_int32 (dict, "type", &type); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get type for " - "operation"); - - *op_errstr = gf_strdup ("Staging stage failed for bitrot " - "operation."); - goto out; - } - - - if ((GF_BITROT_OPTION_TYPE_ENABLE != type) && - (glusterd_is_bitrot_enabled (volinfo) == 0)) { - ret = -1; - gf_asprintf (op_errstr, "Bitrot is not enabled on volume %s", - volname); - goto out; - } - - if ((GF_BITROT_OPTION_TYPE_SCRUB == type)) { - ret = dict_get_str (volinfo->dict, "features.scrub", - &scrub_cmd_from_dict); - if (!ret) { - ret = dict_get_str (dict, "scrub-value", &scrub_cmd); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to " - "get scrub-value"); - *op_errstr = gf_strdup ("Staging failed for " - "bitrot operation. " - "Please check log file" - " for more details."); - goto out; - } - /* If scrubber is resume then value of scrubber will be - * "Active" in the dictionary. */ - if (!strcmp (scrub_cmd_from_dict, scrub_cmd) || - (!strncmp ("Active", scrub_cmd_from_dict, - SLEN ("Active")) && !strncmp ("resume", - scrub_cmd, SLEN ("resume")))) { - snprintf (msg, sizeof (msg), "Scrub is already" - " %sd for volume %s", scrub_cmd, - volinfo->volname); - *op_errstr = gf_strdup (msg); - ret = -1; - goto out; - } - } - ret = 0; - } - - out: - if (ret && op_errstr && *op_errstr) - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_OP_STAGE_BITROT_FAIL, "%s", *op_errstr); - gf_msg_debug (this->name, 0, "Returning %d", ret); +out: + if (ret && op_errstr && *op_errstr) + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_BITROT_FAIL, "%s", + *op_errstr); + gf_msg_debug(this->name, 0, "Returning %d", ret); - return ret; + return ret; } diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c index 73dcfaa..d4bc8b2 100644 --- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c @@ -28,31 +28,32 @@ /* misc */ gf_boolean_t -glusterd_is_tiering_supported (char *op_errstr) +glusterd_is_tiering_supported(char *op_errstr) { - xlator_t *this = NULL; - glusterd_conf_t *conf = NULL; - gf_boolean_t supported = _gf_false; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + gf_boolean_t supported = _gf_false; - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); - conf = this->private; - GF_VALIDATE_OR_GOTO (this->name, conf, out); + conf = this->private; + GF_VALIDATE_OR_GOTO(this->name, conf, out); - if (conf->op_version < GD_OP_VERSION_3_7_0) - goto out; + if (conf->op_version < GD_OP_VERSION_3_7_0) + goto out; - supported = _gf_true; + supported = _gf_true; out: - if (!supported && op_errstr != NULL && conf) - sprintf (op_errstr, "Tier operation failed. The cluster is " - "operating at version %d. Tiering" - " is unavailable in this version.", - conf->op_version); - - return supported; + if (!supported && op_errstr != NULL && conf) + sprintf(op_errstr, + "Tier operation failed. The cluster is " + "operating at version %d. Tiering" + " is unavailable in this version.", + conf->op_version); + + return supported; } /* In this function, we decide, based on the 'count' of the brick, @@ -60,2394 +61,2369 @@ out: how many of the given bricks are added. other argument are self- descriptive. */ int -add_brick_at_right_order (glusterd_brickinfo_t *brickinfo, - glusterd_volinfo_t *volinfo, int count, - int32_t stripe_cnt, int32_t replica_cnt) +add_brick_at_right_order(glusterd_brickinfo_t *brickinfo, + glusterd_volinfo_t *volinfo, int count, + int32_t stripe_cnt, int32_t replica_cnt) { - int idx = 0; - int i = 0; - int sub_cnt = 0; - glusterd_brickinfo_t *brick = NULL; - - /* The complexity of the function is in deciding at which index - to add new brick. Even though it can be defined with a complex - single formula for all volume, it is separated out to make it - more readable */ - if (stripe_cnt) { - /* common formula when 'stripe_count' is set */ - /* idx = ((count / ((stripe_cnt * volinfo->replica_count) - - volinfo->dist_leaf_count)) * volinfo->dist_leaf_count) + - (count + volinfo->dist_leaf_count); - */ - - sub_cnt = volinfo->dist_leaf_count; - - idx = ((count / ((stripe_cnt * volinfo->replica_count) - - sub_cnt)) * sub_cnt) + - (count + sub_cnt); - - goto insert_brick; - } - - /* replica count is set */ - /* common formula when 'replica_count' is set */ - /* idx = ((count / (replica_cnt - existing_replica_count)) * - existing_replica_count) + - (count + existing_replica_count); + int idx = 0; + int i = 0; + int sub_cnt = 0; + glusterd_brickinfo_t *brick = NULL; + + /* The complexity of the function is in deciding at which index + to add new brick. Even though it can be defined with a complex + single formula for all volume, it is separated out to make it + more readable */ + if (stripe_cnt) { + /* common formula when 'stripe_count' is set */ + /* idx = ((count / ((stripe_cnt * volinfo->replica_count) - + volinfo->dist_leaf_count)) * volinfo->dist_leaf_count) + + (count + volinfo->dist_leaf_count); */ - sub_cnt = volinfo->replica_count; - idx = (count / (replica_cnt - sub_cnt) * sub_cnt) + - (count + sub_cnt); + sub_cnt = volinfo->dist_leaf_count; -insert_brick: - i = 0; - cds_list_for_each_entry (brick, &volinfo->bricks, brick_list) { - i++; - if (i < idx) - continue; - gf_msg_debug (THIS->name, 0, "brick:%s index=%d, count=%d", - brick->path, idx, count); - - cds_list_add (&brickinfo->brick_list, &brick->brick_list); - break; - } + idx = ((count / ((stripe_cnt * volinfo->replica_count) - sub_cnt)) * + sub_cnt) + + (count + sub_cnt); - return 0; -} + goto insert_brick; + } + /* replica count is set */ + /* common formula when 'replica_count' is set */ + /* idx = ((count / (replica_cnt - existing_replica_count)) * + existing_replica_count) + + (count + existing_replica_count); + */ + + sub_cnt = volinfo->replica_count; + idx = (count / (replica_cnt - sub_cnt) * sub_cnt) + (count + sub_cnt); + +insert_brick: + i = 0; + cds_list_for_each_entry(brick, &volinfo->bricks, brick_list) + { + i++; + if (i < idx) + continue; + gf_msg_debug(THIS->name, 0, "brick:%s index=%d, count=%d", brick->path, + idx, count); + + cds_list_add(&brickinfo->brick_list, &brick->brick_list); + break; + } + + return 0; +} static int -gd_addbr_validate_stripe_count (glusterd_volinfo_t *volinfo, int stripe_count, - int total_bricks, int *type, char *err_str, - size_t err_len) +gd_addbr_validate_stripe_count(glusterd_volinfo_t *volinfo, int stripe_count, + int total_bricks, int *type, char *err_str, + size_t err_len) { - int ret = -1; + int ret = -1; - switch (volinfo->type) { + switch (volinfo->type) { case GF_CLUSTER_TYPE_NONE: - if ((volinfo->brick_count * stripe_count) == total_bricks) { - /* Change the volume type */ - *type = GF_CLUSTER_TYPE_STRIPE; - gf_msg (THIS->name, GF_LOG_INFO, 0, - GD_MSG_VOL_TYPE_CHANGING_INFO, - "Changing the type of volume %s from " - "'distribute' to 'stripe'", volinfo->volname); - ret = 0; - goto out; - } else { - snprintf (err_str, err_len, "Incorrect number of " - "bricks (%d) supplied for stripe count (%d).", - (total_bricks - volinfo->brick_count), - stripe_count); - gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); - goto out; - } - break; + if ((volinfo->brick_count * stripe_count) == total_bricks) { + /* Change the volume type */ + *type = GF_CLUSTER_TYPE_STRIPE; + gf_msg(THIS->name, GF_LOG_INFO, 0, + GD_MSG_VOL_TYPE_CHANGING_INFO, + "Changing the type of volume %s from " + "'distribute' to 'stripe'", + volinfo->volname); + ret = 0; + goto out; + } else { + snprintf(err_str, err_len, + "Incorrect number of " + "bricks (%d) supplied for stripe count (%d).", + (total_bricks - volinfo->brick_count), stripe_count); + gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, + "%s", err_str); + goto out; + } + break; case GF_CLUSTER_TYPE_REPLICATE: - if (!(total_bricks % (volinfo->replica_count * stripe_count))) { - /* Change the volume type */ - *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE; - gf_msg (THIS->name, GF_LOG_INFO, 0, - GD_MSG_VOL_TYPE_CHANGING_INFO, - "Changing the type of volume %s from " - "'replicate' to 'replicate-stripe'", - volinfo->volname); - ret = 0; - goto out; - } else { - snprintf (err_str, err_len, "Incorrect number of " - "bricks (%d) supplied for changing volume's " - "stripe count to %d, need at least %d bricks", - (total_bricks - volinfo->brick_count), - stripe_count, - (volinfo->replica_count * stripe_count)); - gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); - goto out; - } - break; + if (!(total_bricks % (volinfo->replica_count * stripe_count))) { + /* Change the volume type */ + *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE; + gf_msg(THIS->name, GF_LOG_INFO, 0, + GD_MSG_VOL_TYPE_CHANGING_INFO, + "Changing the type of volume %s from " + "'replicate' to 'replicate-stripe'", + volinfo->volname); + ret = 0; + goto out; + } else { + snprintf(err_str, err_len, + "Incorrect number of " + "bricks (%d) supplied for changing volume's " + "stripe count to %d, need at least %d bricks", + (total_bricks - volinfo->brick_count), stripe_count, + (volinfo->replica_count * stripe_count)); + gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, + "%s", err_str); + goto out; + } + break; case GF_CLUSTER_TYPE_STRIPE: case GF_CLUSTER_TYPE_STRIPE_REPLICATE: - if (stripe_count < volinfo->stripe_count) { - snprintf (err_str, err_len, - "Incorrect stripe count (%d) supplied. " - "Volume already has stripe count (%d)", - stripe_count, volinfo->stripe_count); - gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); - goto out; - } - if (stripe_count == volinfo->stripe_count) { - if (!(total_bricks % volinfo->dist_leaf_count)) { - /* its same as the one which exists */ - ret = 1; - goto out; - } - } - if (stripe_count > volinfo->stripe_count) { - /* We have to make sure before and after 'add-brick', - the number or subvolumes for distribute will remain - same, when stripe count is given */ - if ((volinfo->brick_count * (stripe_count * - volinfo->replica_count)) == - (total_bricks * volinfo->dist_leaf_count)) { - /* Change the dist_leaf_count */ - gf_msg (THIS->name, GF_LOG_INFO, 0, - GD_MSG_STRIPE_COUNT_CHANGE_INFO, - "Changing the stripe count of " - "volume %s from %d to %d", - volinfo->volname, - volinfo->stripe_count, stripe_count); - ret = 0; - goto out; - } - } - break; + if (stripe_count < volinfo->stripe_count) { + snprintf(err_str, err_len, + "Incorrect stripe count (%d) supplied. " + "Volume already has stripe count (%d)", + stripe_count, volinfo->stripe_count); + gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, + "%s", err_str); + goto out; + } + if (stripe_count == volinfo->stripe_count) { + if (!(total_bricks % volinfo->dist_leaf_count)) { + /* its same as the one which exists */ + ret = 1; + goto out; + } + } + if (stripe_count > volinfo->stripe_count) { + /* We have to make sure before and after 'add-brick', + the number or subvolumes for distribute will remain + same, when stripe count is given */ + if ((volinfo->brick_count * + (stripe_count * volinfo->replica_count)) == + (total_bricks * volinfo->dist_leaf_count)) { + /* Change the dist_leaf_count */ + gf_msg(THIS->name, GF_LOG_INFO, 0, + GD_MSG_STRIPE_COUNT_CHANGE_INFO, + "Changing the stripe count of " + "volume %s from %d to %d", + volinfo->volname, volinfo->stripe_count, + stripe_count); + ret = 0; + goto out; + } + } + break; case GF_CLUSTER_TYPE_DISPERSE: - snprintf (err_str, err_len, "Volume %s cannot be converted " - "from dispersed to striped-" - "dispersed", volinfo->volname); - gf_msg(THIS->name, GF_LOG_ERROR, EPERM, - GD_MSG_OP_NOT_PERMITTED, "%s", err_str); - goto out; - } + snprintf(err_str, err_len, + "Volume %s cannot be converted " + "from dispersed to striped-" + "dispersed", + volinfo->volname); + gf_msg(THIS->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED, + "%s", err_str); + goto out; + } out: - return ret; + return ret; } static int -gd_addbr_validate_replica_count (glusterd_volinfo_t *volinfo, int replica_count, - int arbiter_count, int total_bricks, int *type, - char *err_str, int err_len) +gd_addbr_validate_replica_count(glusterd_volinfo_t *volinfo, int replica_count, + int arbiter_count, int total_bricks, int *type, + char *err_str, int err_len) { - int ret = -1; + int ret = -1; - /* replica count is set */ - switch (volinfo->type) { + /* replica count is set */ + switch (volinfo->type) { case GF_CLUSTER_TYPE_NONE: - if ((volinfo->brick_count * replica_count) == total_bricks) { - /* Change the volume type */ - *type = GF_CLUSTER_TYPE_REPLICATE; - gf_msg (THIS->name, GF_LOG_INFO, 0, - GD_MSG_VOL_TYPE_CHANGING_INFO, - "Changing the type of volume %s from " - "'distribute' to 'replica'", volinfo->volname); - ret = 0; - goto out; + if ((volinfo->brick_count * replica_count) == total_bricks) { + /* Change the volume type */ + *type = GF_CLUSTER_TYPE_REPLICATE; + gf_msg(THIS->name, GF_LOG_INFO, 0, + GD_MSG_VOL_TYPE_CHANGING_INFO, + "Changing the type of volume %s from " + "'distribute' to 'replica'", + volinfo->volname); + ret = 0; + goto out; - } else { - snprintf (err_str, err_len, "Incorrect number of " - "bricks (%d) supplied for replica count (%d).", - (total_bricks - volinfo->brick_count), - replica_count); - gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); - goto out; - } - break; + } else { + snprintf(err_str, err_len, + "Incorrect number of " + "bricks (%d) supplied for replica count (%d).", + (total_bricks - volinfo->brick_count), replica_count); + gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, + "%s", err_str); + goto out; + } + break; case GF_CLUSTER_TYPE_STRIPE: - if (!(total_bricks % (volinfo->dist_leaf_count * replica_count))) { - /* Change the volume type */ - *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE; - gf_msg (THIS->name, GF_LOG_INFO, 0, - GD_MSG_VOL_TYPE_CHANGING_INFO, - "Changing the type of volume %s from " - "'stripe' to 'replicate-stripe'", - volinfo->volname); - ret = 0; - goto out; - } else { - snprintf (err_str, err_len, "Incorrect number of " - "bricks (%d) supplied for changing volume's " - "replica count to %d, need at least %d " - "bricks", - (total_bricks - volinfo->brick_count), - replica_count, (volinfo->dist_leaf_count * - replica_count)); - gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); - goto out; - } - break; + if (!(total_bricks % (volinfo->dist_leaf_count * replica_count))) { + /* Change the volume type */ + *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE; + gf_msg(THIS->name, GF_LOG_INFO, 0, + GD_MSG_VOL_TYPE_CHANGING_INFO, + "Changing the type of volume %s from " + "'stripe' to 'replicate-stripe'", + volinfo->volname); + ret = 0; + goto out; + } else { + snprintf(err_str, err_len, + "Incorrect number of " + "bricks (%d) supplied for changing volume's " + "replica count to %d, need at least %d " + "bricks", + (total_bricks - volinfo->brick_count), replica_count, + (volinfo->dist_leaf_count * replica_count)); + gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, + "%s", err_str); + goto out; + } + break; case GF_CLUSTER_TYPE_REPLICATE: case GF_CLUSTER_TYPE_STRIPE_REPLICATE: - if (replica_count < volinfo->replica_count) { - snprintf (err_str, err_len, - "Incorrect replica count (%d) supplied. " - "Volume already has (%d)", - replica_count, volinfo->replica_count); - gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); - goto out; - } - if (replica_count == volinfo->replica_count) { - if (arbiter_count && !volinfo->arbiter_count) { - snprintf (err_str, err_len, - "Cannot convert replica 3 volume " - "to arbiter volume."); - gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); - goto out; - } - if (!(total_bricks % volinfo->dist_leaf_count)) { - ret = 1; - goto out; - } - } - if (replica_count > volinfo->replica_count) { - /* We have to make sure before and after 'add-brick', - the number or subvolumes for distribute will remain - same, when replica count is given */ - if ((total_bricks * volinfo->dist_leaf_count) == - (volinfo->brick_count * (replica_count * - volinfo->stripe_count))) { - /* Change the dist_leaf_count */ - gf_msg (THIS->name, GF_LOG_INFO, 0, - GD_MSG_REPLICA_COUNT_CHANGE_INFO, - "Changing the replica count of " - "volume %s from %d to %d", - volinfo->volname, volinfo->replica_count, - replica_count); - ret = 0; - goto out; - } - } - break; + if (replica_count < volinfo->replica_count) { + snprintf(err_str, err_len, + "Incorrect replica count (%d) supplied. " + "Volume already has (%d)", + replica_count, volinfo->replica_count); + gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, + "%s", err_str); + goto out; + } + if (replica_count == volinfo->replica_count) { + if (arbiter_count && !volinfo->arbiter_count) { + snprintf(err_str, err_len, + "Cannot convert replica 3 volume " + "to arbiter volume."); + gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, + GD_MSG_INVALID_ENTRY, "%s", err_str); + goto out; + } + if (!(total_bricks % volinfo->dist_leaf_count)) { + ret = 1; + goto out; + } + } + if (replica_count > volinfo->replica_count) { + /* We have to make sure before and after 'add-brick', + the number or subvolumes for distribute will remain + same, when replica count is given */ + if ((total_bricks * volinfo->dist_leaf_count) == + (volinfo->brick_count * + (replica_count * volinfo->stripe_count))) { + /* Change the dist_leaf_count */ + gf_msg(THIS->name, GF_LOG_INFO, 0, + GD_MSG_REPLICA_COUNT_CHANGE_INFO, + "Changing the replica count of " + "volume %s from %d to %d", + volinfo->volname, volinfo->replica_count, + replica_count); + ret = 0; + goto out; + } + } + break; case GF_CLUSTER_TYPE_DISPERSE: - snprintf (err_str, err_len, "Volume %s cannot be converted " - "from dispersed to replicated-" - "dispersed", volinfo->volname); - gf_msg(THIS->name, GF_LOG_ERROR, EPERM, - GD_MSG_OP_NOT_PERMITTED, "%s", err_str); - goto out; - } + snprintf(err_str, err_len, + "Volume %s cannot be converted " + "from dispersed to replicated-" + "dispersed", + volinfo->volname); + gf_msg(THIS->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED, + "%s", err_str); + goto out; + } out: - return ret; + return ret; } static int -gd_rmbr_validate_replica_count (glusterd_volinfo_t *volinfo, - int32_t replica_count, - int32_t brick_count, char *err_str, - size_t err_len) +gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo, + int32_t replica_count, int32_t brick_count, + char *err_str, size_t err_len) { - int ret = -1; - int replica_nodes = 0; + int ret = -1; + int replica_nodes = 0; - switch (volinfo->type) { + switch (volinfo->type) { case GF_CLUSTER_TYPE_TIER: - ret = 1; - goto out; + ret = 1; + goto out; case GF_CLUSTER_TYPE_NONE: case GF_CLUSTER_TYPE_STRIPE: case GF_CLUSTER_TYPE_DISPERSE: - snprintf (err_str, err_len, - "replica count (%d) option given for non replicate " - "volume %s", replica_count, volinfo->volname); - gf_msg (THIS->name, GF_LOG_WARNING, 0, - GD_MSG_VOL_NOT_REPLICA, "%s", err_str); - goto out; + snprintf(err_str, err_len, + "replica count (%d) option given for non replicate " + "volume %s", + replica_count, volinfo->volname); + gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_REPLICA, "%s", + err_str); + goto out; case GF_CLUSTER_TYPE_REPLICATE: case GF_CLUSTER_TYPE_STRIPE_REPLICATE: - /* in remove brick, you can only reduce the replica count */ - if (replica_count > volinfo->replica_count) { - snprintf (err_str, err_len, - "given replica count (%d) option is more " - "than volume %s's replica count (%d)", - replica_count, volinfo->volname, - volinfo->replica_count); - gf_msg (THIS->name, GF_LOG_WARNING, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); - goto out; - } - if (replica_count == volinfo->replica_count) { - /* This means the 'replica N' option on CLI was - redundant. Check if the total number of bricks given - for removal is same as 'dist_leaf_count' */ - if (brick_count % volinfo->dist_leaf_count) { - snprintf (err_str, err_len, - "number of bricks provided (%d) is " - "not valid. need at least %d " - "(or %dxN)", brick_count, - volinfo->dist_leaf_count, - volinfo->dist_leaf_count); - gf_msg (THIS->name, GF_LOG_WARNING, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", - err_str); - goto out; - } - ret = 1; - goto out; + /* in remove brick, you can only reduce the replica count */ + if (replica_count > volinfo->replica_count) { + snprintf(err_str, err_len, + "given replica count (%d) option is more " + "than volume %s's replica count (%d)", + replica_count, volinfo->volname, + volinfo->replica_count); + gf_msg(THIS->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY, + "%s", err_str); + goto out; + } + if (replica_count == volinfo->replica_count) { + /* This means the 'replica N' option on CLI was + redundant. Check if the total number of bricks given + for removal is same as 'dist_leaf_count' */ + if (brick_count % volinfo->dist_leaf_count) { + snprintf(err_str, err_len, + "number of bricks provided (%d) is " + "not valid. need at least %d " + "(or %dxN)", + brick_count, volinfo->dist_leaf_count, + volinfo->dist_leaf_count); + gf_msg(THIS->name, GF_LOG_WARNING, EINVAL, + GD_MSG_INVALID_ENTRY, "%s", err_str); + goto out; } + ret = 1; + goto out; + } - replica_nodes = ((volinfo->brick_count / - volinfo->replica_count) * - (volinfo->replica_count - replica_count)); + replica_nodes = ((volinfo->brick_count / volinfo->replica_count) * + (volinfo->replica_count - replica_count)); - if (brick_count % replica_nodes) { - snprintf (err_str, err_len, - "need %d(xN) bricks for reducing replica " - "count of the volume from %d to %d", - replica_nodes, volinfo->replica_count, - replica_count); - goto out; - } - break; - } + if (brick_count % replica_nodes) { + snprintf(err_str, err_len, + "need %d(xN) bricks for reducing replica " + "count of the volume from %d to %d", + replica_nodes, volinfo->replica_count, replica_count); + goto out; + } + break; + } - ret = 0; + ret = 0; out: - return ret; + return ret; } /* Handler functions */ int -__glusterd_handle_add_brick (rpcsvc_request_t *req) +__glusterd_handle_add_brick(rpcsvc_request_t *req) { - int32_t ret = -1; - gf_cli_req cli_req = {{0,}}; - dict_t *dict = NULL; - char *bricks = NULL; - char *volname = NULL; - int brick_count = 0; - void *cli_rsp = NULL; - char err_str[2048] = ""; - gf_cli_rsp rsp = {0,}; - glusterd_volinfo_t *volinfo = NULL; - xlator_t *this = NULL; - int total_bricks = 0; - int32_t replica_count = 0; - int32_t arbiter_count = 0; - int32_t stripe_count = 0; - int type = 0; - glusterd_conf_t *conf = NULL; - - this = THIS; - GF_ASSERT(this); - - GF_ASSERT (req); - - conf = this->private; - GF_ASSERT (conf); - - ret = xdr_to_generic (req->msg[0], &cli_req, - (xdrproc_t)xdr_gf_cli_req); + int32_t ret = -1; + gf_cli_req cli_req = {{ + 0, + }}; + dict_t *dict = NULL; + char *bricks = NULL; + char *volname = NULL; + int brick_count = 0; + void *cli_rsp = NULL; + char err_str[2048] = ""; + gf_cli_rsp rsp = { + 0, + }; + glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = NULL; + int total_bricks = 0; + int32_t replica_count = 0; + int32_t arbiter_count = 0; + int32_t stripe_count = 0; + int type = 0; + glusterd_conf_t *conf = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(req); + + conf = this->private; + GF_ASSERT(conf); + + ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + // failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + snprintf(err_str, sizeof(err_str), "Garbage args received"); + goto out; + } + + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_ADD_BRICK_REQ_RECVD, + "Received add brick req"); + + if (cli_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new(); + + ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, + &dict); if (ret < 0) { - //failed to decode msg; - req->rpc_err = GARBAGE_ARGS; - snprintf (err_str, sizeof (err_str), "Garbage args received"); - goto out; - } - - gf_msg (this->name, GF_LOG_INFO, 0, - GD_MSG_ADD_BRICK_REQ_RECVD, "Received add brick req"); - - if (cli_req.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); - - ret = dict_unserialize (cli_req.dict.dict_val, - cli_req.dict.dict_len, - &dict); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to " - "unserialize req-buffer to dictionary"); - snprintf (err_str, sizeof (err_str), "Unable to decode " - "the command"); - goto out; - } - } - - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - - if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get volume " - "name"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; - } - - if (!(ret = glusterd_check_volume_exists (volname))) { - ret = -1; - snprintf (err_str, sizeof (err_str), "Volume %s does not exist", - volname); - gf_msg (this->name, GF_LOG_ERROR, EINVAL, - GD_MSG_VOL_NOT_FOUND, "%s", err_str); - goto out; - } - - ret = dict_get_int32n (dict, "count", SLEN ("count"), &brick_count); - if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get volume " - "brick count"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; - } - - ret = dict_get_int32n (dict, "replica-count", SLEN ("replica-count"), - &replica_count); - if (!ret) { - gf_msg (this->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_SUCCESS, "replica-count is %d", - replica_count); - } - - ret = dict_get_int32n (dict, "arbiter-count", SLEN ("arbiter-count"), - &arbiter_count); - if (!ret) { - gf_msg (this->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_SUCCESS, "arbiter-count is %d", - arbiter_count); - } - - ret = dict_get_int32n (dict, "stripe-count", SLEN ("stripe-count"), - &stripe_count); - if (!ret) { - gf_msg (this->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_SUCCESS, "stripe-count is %d", - stripe_count); - } - - if (!dict_getn (dict, "force", SLEN ("force"))) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Failed to get flag"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); - if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get volinfo " - "for volume name %s", volname); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOLINFO_GET_FAIL, "%s", err_str); - goto out; - + gf_msg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_UNSERIALIZE_FAIL, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf(err_str, sizeof(err_str), + "Unable to decode " + "the command"); + goto out; + } + } + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get volume " + "name"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + if (!(ret = glusterd_check_volume_exists(volname))) { + ret = -1; + snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname); + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s", + err_str); + goto out; + } + + ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get volume " + "brick count"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), + &replica_count); + if (!ret) { + gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS, + "replica-count is %d", replica_count); + } + + ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"), + &arbiter_count); + if (!ret) { + gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS, + "arbiter-count is %d", arbiter_count); + } + + ret = dict_get_int32n(dict, "stripe-count", SLEN("stripe-count"), + &stripe_count); + if (!ret) { + gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS, + "stripe-count is %d", stripe_count); + } + + if (!dict_getn(dict, "force", SLEN("force"))) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Failed to get flag"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get volinfo " + "for volume name %s", + volname); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s", + err_str); + goto out; + } + + total_bricks = volinfo->brick_count + brick_count; + + if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) { + if (volinfo->type == GF_CLUSTER_TYPE_TIER) { + snprintf(err_str, sizeof(err_str), "Volume %s is already a tier.", + volname); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_TIER, "%s", + err_str); + ret = -1; + goto out; } - total_bricks = volinfo->brick_count + brick_count; - - if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) { - if (volinfo->type == GF_CLUSTER_TYPE_TIER) { - snprintf (err_str, sizeof (err_str), - "Volume %s is already a tier.", volname); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_ALREADY_TIER, "%s", err_str); - ret = -1; - goto out; - } - - if (glusterd_is_tiering_supported(err_str) == _gf_false) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VERSION_UNSUPPORTED, - "Tiering not supported at this version"); - ret = -1; - goto out; - } - - ret = dict_get_int32n (dict, "hot-type", SLEN ("hot-type"), &type); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, - "failed to get type from dictionary"); - goto out; - } - - goto brick_val; + if (glusterd_is_tiering_supported(err_str) == _gf_false) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED, + "Tiering not supported at this version"); + ret = -1; + goto out; } - ret = glusterd_disallow_op_for_tier (volinfo, GD_OP_ADD_BRICK, -1); + ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type); if (ret) { - snprintf (err_str, sizeof (err_str), "Add-brick operation is " - "not supported on a tiered volume %s", volname); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_OP_UNSUPPORTED, "%s", err_str); - goto out; - } - - if (!stripe_count && !replica_count) { - if (volinfo->type == GF_CLUSTER_TYPE_NONE) - goto brick_val; - - if ((volinfo->brick_count < volinfo->dist_leaf_count) && - (total_bricks <= volinfo->dist_leaf_count)) - goto brick_val; - - if ((brick_count % volinfo->dist_leaf_count) != 0) { - snprintf (err_str, sizeof (err_str), "Incorrect number " - "of bricks supplied %d with count %d", - brick_count, volinfo->dist_leaf_count); - gf_msg (this->name, GF_LOG_ERROR, EINVAL, - GD_MSG_VOL_NOT_REPLICA, "%s", err_str); - ret = -1; - goto out; - } - goto brick_val; - /* done with validation.. below section is if stripe|replica - count is given */ - } - - /* These bricks needs to be added one per a replica or stripe volume */ - if (stripe_count) { - ret = gd_addbr_validate_stripe_count (volinfo, stripe_count, - total_bricks, &type, - err_str, - sizeof (err_str)); - if (ret == -1) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_COUNT_VALIDATE_FAILED, "%s", err_str); - goto out; - } - - /* if stripe count is same as earlier, set it back to 0 */ - if (ret == 1) - stripe_count = 0; - - ret = dict_set_int32n (dict, "stripe-count", - SLEN ("stripe-count"), stripe_count); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, - "failed to set the stripe-count in dict"); - goto out; - } - goto brick_val; - } - - ret = gd_addbr_validate_replica_count (volinfo, replica_count, - arbiter_count, total_bricks, - &type, err_str, - sizeof (err_str)); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "failed to get type from dictionary"); + goto out; + } + + goto brick_val; + } + + ret = glusterd_disallow_op_for_tier(volinfo, GD_OP_ADD_BRICK, -1); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Add-brick operation is " + "not supported on a tiered volume %s", + volname); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_UNSUPPORTED, "%s", + err_str); + goto out; + } + + if (!stripe_count && !replica_count) { + if (volinfo->type == GF_CLUSTER_TYPE_NONE) + goto brick_val; + + if ((volinfo->brick_count < volinfo->dist_leaf_count) && + (total_bricks <= volinfo->dist_leaf_count)) + goto brick_val; + + if ((brick_count % volinfo->dist_leaf_count) != 0) { + snprintf(err_str, sizeof(err_str), + "Incorrect number " + "of bricks supplied %d with count %d", + brick_count, volinfo->dist_leaf_count); + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_REPLICA, + "%s", err_str); + ret = -1; + goto out; + } + goto brick_val; + /* done with validation.. below section is if stripe|replica + count is given */ + } + + /* These bricks needs to be added one per a replica or stripe volume */ + if (stripe_count) { + ret = gd_addbr_validate_stripe_count(volinfo, stripe_count, + total_bricks, &type, err_str, + sizeof(err_str)); if (ret == -1) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_COUNT_VALIDATE_FAILED, "%s", err_str); - goto out; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COUNT_VALIDATE_FAILED, + "%s", err_str); + goto out; } - /* if replica count is same as earlier, set it back to 0 */ + /* if stripe count is same as earlier, set it back to 0 */ if (ret == 1) - replica_count = 0; + stripe_count = 0; - ret = dict_set_int32n (dict, "replica-count", - SLEN ("replica-count"), replica_count); + ret = dict_set_int32n(dict, "stripe-count", SLEN("stripe-count"), + stripe_count); if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, - "failed to set the replica-count in dict"); - goto out; - } + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "failed to set the stripe-count in dict"); + goto out; + } + goto brick_val; + } + + ret = gd_addbr_validate_replica_count(volinfo, replica_count, arbiter_count, + total_bricks, &type, err_str, + sizeof(err_str)); + if (ret == -1) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COUNT_VALIDATE_FAILED, "%s", + err_str); + goto out; + } + + /* if replica count is same as earlier, set it back to 0 */ + if (ret == 1) + replica_count = 0; + + ret = dict_set_int32n(dict, "replica-count", SLEN("replica-count"), + replica_count); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "failed to set the replica-count in dict"); + goto out; + } brick_val: - ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks); + ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get volume " + "bricks"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + if (type != volinfo->type) { + ret = dict_set_int32n(dict, "type", SLEN("type"), type); if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get volume " - "bricks"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; - } - - if (type != volinfo->type) { - ret = dict_set_int32n (dict, "type", SLEN ("type"), type); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, - "failed to set the new type in dict"); - goto out; - } - } - - if (conf->op_version <= GD_OP_VERSION_3_7_5) { - gf_msg_debug (this->name, 0, "The cluster is operating at " - "version less than or equal to %d. Falling back " - "to syncop framework.", - GD_OP_VERSION_3_7_5); - ret = glusterd_op_begin_synctask (req, GD_OP_ADD_BRICK, dict); - } else { - ret = glusterd_mgmt_v3_initiate_all_phases (req, - GD_OP_ADD_BRICK, - dict); - } + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "failed to set the new type in dict"); + goto out; + } + } + + if (conf->op_version <= GD_OP_VERSION_3_7_5) { + gf_msg_debug(this->name, 0, + "The cluster is operating at " + "version less than or equal to %d. Falling back " + "to syncop framework.", + GD_OP_VERSION_3_7_5); + ret = glusterd_op_begin_synctask(req, GD_OP_ADD_BRICK, dict); + } else { + ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_ADD_BRICK, dict); + } out: - if (ret) { - rsp.op_ret = -1; - rsp.op_errno = 0; - if (err_str[0] == '\0') - snprintf (err_str, sizeof (err_str), "Operation failed"); - rsp.op_errstr = err_str; - cli_rsp = &rsp; - glusterd_to_cli (req, cli_rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf_cli_rsp, dict); - ret = 0; //sent error to cli, prevent second reply - } + if (ret) { + rsp.op_ret = -1; + rsp.op_errno = 0; + if (err_str[0] == '\0') + snprintf(err_str, sizeof(err_str), "Operation failed"); + rsp.op_errstr = err_str; + cli_rsp = &rsp; + glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, + dict); + ret = 0; // sent error to cli, prevent second reply + } - free (cli_req.dict.dict_val); //its malloced by xdr + free(cli_req.dict.dict_val); // its malloced by xdr - return ret; + return ret; } int -glusterd_handle_add_brick (rpcsvc_request_t *req) +glusterd_handle_add_brick(rpcsvc_request_t *req) { - return glusterd_big_locked_handler (req, __glusterd_handle_add_brick); + return glusterd_big_locked_handler(req, __glusterd_handle_add_brick); } static int -subvol_matcher_init (int **subvols, int count) +subvol_matcher_init(int **subvols, int count) { - int ret = -1; + int ret = -1; - *subvols = GF_CALLOC (count, sizeof(int), gf_gld_mt_int); - if (*subvols) - ret = 0; + *subvols = GF_CALLOC(count, sizeof(int), gf_gld_mt_int); + if (*subvols) + ret = 0; - return ret; + return ret; } static void -subvol_matcher_update (int *subvols, glusterd_volinfo_t *volinfo, - glusterd_brickinfo_t *brickinfo) +subvol_matcher_update(int *subvols, glusterd_volinfo_t *volinfo, + glusterd_brickinfo_t *brickinfo) { - glusterd_brickinfo_t *tmp = NULL; - int32_t sub_volume = 0; - int pos = 0; - - cds_list_for_each_entry (tmp, &volinfo->bricks, brick_list) { - - if (strcmp (tmp->hostname, brickinfo->hostname) || - strcmp (tmp->path, brickinfo->path)) { - pos++; - continue; - } - gf_msg_debug (THIS->name, 0, LOGSTR_FOUND_BRICK, - brickinfo->hostname, brickinfo->path, - volinfo->volname); - sub_volume = (pos / volinfo->dist_leaf_count); - subvols[sub_volume]++; - break; - } - + glusterd_brickinfo_t *tmp = NULL; + int32_t sub_volume = 0; + int pos = 0; + + cds_list_for_each_entry(tmp, &volinfo->bricks, brick_list) + { + if (strcmp(tmp->hostname, brickinfo->hostname) || + strcmp(tmp->path, brickinfo->path)) { + pos++; + continue; + } + gf_msg_debug(THIS->name, 0, LOGSTR_FOUND_BRICK, brickinfo->hostname, + brickinfo->path, volinfo->volname); + sub_volume = (pos / volinfo->dist_leaf_count); + subvols[sub_volume]++; + break; + } } static int -subvol_matcher_verify (int *subvols, glusterd_volinfo_t *volinfo, char *err_str, - size_t err_len, char *vol_type, int replica_count) +subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str, + size_t err_len, char *vol_type, int replica_count) { - int i = 0; - int ret = 0; - int count = volinfo->replica_count-replica_count; - - if (replica_count) { - for (i = 0; i < volinfo->subvol_count; i++) { - if (subvols[i] != count) { - ret = -1; - snprintf (err_str, err_len, "Remove exactly %d" - " brick(s) from each subvolume.", count); - break; - } - } - return ret; - } + int i = 0; + int ret = 0; + int count = volinfo->replica_count - replica_count; - do { + if (replica_count) { + for (i = 0; i < volinfo->subvol_count; i++) { + if (subvols[i] != count) { + ret = -1; + snprintf(err_str, err_len, + "Remove exactly %d" + " brick(s) from each subvolume.", + count); + break; + } + } + return ret; + } - if (subvols[i] % volinfo->dist_leaf_count == 0) { - continue; - } else { - ret = -1; - snprintf (err_str, err_len, - "Bricks not from same subvol for %s", vol_type); - break; - } - } while (++i < volinfo->subvol_count); + do { + if (subvols[i] % volinfo->dist_leaf_count == 0) { + continue; + } else { + ret = -1; + snprintf(err_str, err_len, "Bricks not from same subvol for %s", + vol_type); + break; + } + } while (++i < volinfo->subvol_count); - return ret; + return ret; } static void -subvol_matcher_destroy (int *subvols) +subvol_matcher_destroy(int *subvols) { - GF_FREE (subvols); + GF_FREE(subvols); } int glusterd_set_detach_bricks(dict_t *dict, glusterd_volinfo_t *volinfo) { - char key[64] = ""; - char value[2048] = ""; /* hostname + path */ - int brick_num = 0; - int hot_brick_num = 0; - glusterd_brickinfo_t *brickinfo; - int ret = 0; - int32_t len = 0; - - /* cold tier bricks at tail of list so use reverse iteration */ - cds_list_for_each_entry_reverse (brickinfo, &volinfo->bricks, - brick_list) { - brick_num++; - if (brick_num > volinfo->tier_info.cold_brick_count) { - hot_brick_num++; - sprintf (key, "brick%d", hot_brick_num); - len = snprintf (value, sizeof(value), "%s:%s", - brickinfo->hostname, - brickinfo->path); - if ((len < 0) || (len >= sizeof(value))) { - return -1; - } + char key[64] = ""; + char value[2048] = ""; /* hostname + path */ + int brick_num = 0; + int hot_brick_num = 0; + glusterd_brickinfo_t *brickinfo; + int ret = 0; + int32_t len = 0; + + /* cold tier bricks at tail of list so use reverse iteration */ + cds_list_for_each_entry_reverse(brickinfo, &volinfo->bricks, brick_list) + { + brick_num++; + if (brick_num > volinfo->tier_info.cold_brick_count) { + hot_brick_num++; + sprintf(key, "brick%d", hot_brick_num); + len = snprintf(value, sizeof(value), "%s:%s", brickinfo->hostname, + brickinfo->path); + if ((len < 0) || (len >= sizeof(value))) { + return -1; + } - ret = dict_set_str (dict, key, strdup(value)); - if (ret) - break; - } + ret = dict_set_str(dict, key, strdup(value)); + if (ret) + break; } + } - ret = dict_set_int32n (dict, "count", SLEN ("count"), hot_brick_num); - if (ret) - return -1; + ret = dict_set_int32n(dict, "count", SLEN("count"), hot_brick_num); + if (ret) + return -1; - return hot_brick_num; + return hot_brick_num; } static int -glusterd_remove_brick_validate_arbiters (glusterd_volinfo_t *volinfo, - int32_t count, int32_t replica_count, - glusterd_brickinfo_t **brickinfo_list, - char *err_str, size_t err_len) +glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo, + int32_t count, int32_t replica_count, + glusterd_brickinfo_t **brickinfo_list, + char *err_str, size_t err_len) { - int i = 0; - int ret = 0; - glusterd_brickinfo_t *brickinfo = NULL; - glusterd_brickinfo_t *last = NULL; - char *arbiter_array = NULL; - - if ((volinfo->type != GF_CLUSTER_TYPE_REPLICATE) && - (volinfo->type != GF_CLUSTER_TYPE_STRIPE_REPLICATE)) - goto out; - - if (!replica_count || !volinfo->arbiter_count) + int i = 0; + int ret = 0; + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_brickinfo_t *last = NULL; + char *arbiter_array = NULL; + + if ((volinfo->type != GF_CLUSTER_TYPE_REPLICATE) && + (volinfo->type != GF_CLUSTER_TYPE_STRIPE_REPLICATE)) + goto out; + + if (!replica_count || !volinfo->arbiter_count) + goto out; + + if (replica_count == 2) { + /* If it is an arbiter to replica 2 conversion, only permit + * removal of the arbiter brick.*/ + for (i = 0; i < count; i++) { + brickinfo = brickinfo_list[i]; + last = get_last_brick_of_brick_group(volinfo, brickinfo); + if (last != brickinfo) { + snprintf(err_str, err_len, + "Remove arbiter " + "brick(s) only when converting from " + "arbiter to replica 2 subvolume."); + ret = -1; goto out; - - if (replica_count == 2) { - /* If it is an arbiter to replica 2 conversion, only permit - * removal of the arbiter brick.*/ - for (i = 0; i < count; i++) { - brickinfo = brickinfo_list[i]; - last = get_last_brick_of_brick_group (volinfo, - brickinfo); - if (last != brickinfo) { - snprintf (err_str, err_len, "Remove arbiter " - "brick(s) only when converting from " - "arbiter to replica 2 subvolume."); - ret = -1; - goto out; - } - } - } else if (replica_count == 1) { - /* If it is an arbiter to plain distribute conversion, in every - * replica subvol, the arbiter has to be one of the bricks that - * are removed. */ - arbiter_array = GF_CALLOC (volinfo->subvol_count, - sizeof (*arbiter_array), - gf_common_mt_char); - if (!arbiter_array) - return -1; - for (i = 0; i < count; i++) { - brickinfo = brickinfo_list[i]; - last = get_last_brick_of_brick_group (volinfo, - brickinfo); - if (last == brickinfo) - arbiter_array[brickinfo->group] = 1; - } - for (i = 0; i < volinfo->subvol_count; i++) - if (!arbiter_array[i]) { - snprintf (err_str, err_len, "Removed bricks " - "must contain arbiter when converting" - " to plain distrubute."); - ret = -1; - break; - } - GF_FREE (arbiter_array); - } + } + } + } else if (replica_count == 1) { + /* If it is an arbiter to plain distribute conversion, in every + * replica subvol, the arbiter has to be one of the bricks that + * are removed. */ + arbiter_array = GF_CALLOC(volinfo->subvol_count, sizeof(*arbiter_array), + gf_common_mt_char); + if (!arbiter_array) + return -1; + for (i = 0; i < count; i++) { + brickinfo = brickinfo_list[i]; + last = get_last_brick_of_brick_group(volinfo, brickinfo); + if (last == brickinfo) + arbiter_array[brickinfo->group] = 1; + } + for (i = 0; i < volinfo->subvol_count; i++) + if (!arbiter_array[i]) { + snprintf(err_str, err_len, + "Removed bricks " + "must contain arbiter when converting" + " to plain distrubute."); + ret = -1; + break; + } + GF_FREE(arbiter_array); + } out: - return ret; + return ret; } int -__glusterd_handle_remove_brick (rpcsvc_request_t *req) +__glusterd_handle_remove_brick(rpcsvc_request_t *req) { - int32_t ret = -1; - gf_cli_req cli_req = {{0,}}; - dict_t *dict = NULL; - int32_t count = 0; - char *brick = NULL; - char key[64] = ""; - int keylen; - int i = 1; - glusterd_volinfo_t *volinfo = NULL; - glusterd_brickinfo_t *brickinfo = NULL; - glusterd_brickinfo_t **brickinfo_list = NULL; - int *subvols = NULL; - char err_str[2048] = ""; - gf_cli_rsp rsp = {0,}; - void *cli_rsp = NULL; - char vol_type[256] = ""; - int32_t replica_count = 0; - char *volname = 0; - xlator_t *this = NULL; - int cmd = -1; - - GF_ASSERT (req); - this = THIS; - GF_ASSERT (this); - - ret = xdr_to_generic (req->msg[0], &cli_req, - (xdrproc_t)xdr_gf_cli_req); + int32_t ret = -1; + gf_cli_req cli_req = {{ + 0, + }}; + dict_t *dict = NULL; + int32_t count = 0; + char *brick = NULL; + char key[64] = ""; + int keylen; + int i = 1; + glusterd_volinfo_t *volinfo = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_brickinfo_t **brickinfo_list = NULL; + int *subvols = NULL; + char err_str[2048] = ""; + gf_cli_rsp rsp = { + 0, + }; + void *cli_rsp = NULL; + char vol_type[256] = ""; + int32_t replica_count = 0; + char *volname = 0; + xlator_t *this = NULL; + int cmd = -1; + + GF_ASSERT(req); + this = THIS; + GF_ASSERT(this); + + ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + // failed to decode msg; + req->rpc_err = GARBAGE_ARGS; + snprintf(err_str, sizeof(err_str), "Received garbage args"); + goto out; + } + + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_REM_BRICK_REQ_RECVD, + "Received rem brick req"); + + if (cli_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new(); + + ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, + &dict); if (ret < 0) { - //failed to decode msg; - req->rpc_err = GARBAGE_ARGS; - snprintf (err_str, sizeof (err_str), "Received garbage args"); - goto out; - } - - gf_msg (this->name, GF_LOG_INFO, 0, - GD_MSG_REM_BRICK_REQ_RECVD, - "Received rem brick req"); - - if (cli_req.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); - - ret = dict_unserialize (cli_req.dict.dict_val, - cli_req.dict.dict_len, - &dict); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to " - "unserialize req-buffer to dictionary"); - snprintf (err_str, sizeof (err_str), "Unable to decode " - "the command"); - goto out; - } - } - - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get volume " - "name"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; - } - - ret = dict_get_int32n (dict, "count", SLEN ("count"), &count); - if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get brick " - "count"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); - if (ret) { - snprintf (err_str, sizeof (err_str),"Volume %s does not exist", - volname); - gf_msg (this->name, GF_LOG_ERROR, EINVAL, - GD_MSG_VOL_NOT_FOUND, "%s", err_str); - goto out; - } - - if ((volinfo->type == GF_CLUSTER_TYPE_TIER) && - (glusterd_is_tiering_supported(err_str) == _gf_false)) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VERSION_UNSUPPORTED, - "Tiering not supported at this version"); - ret = -1; - goto out; - } - - ret = dict_get_int32n (dict, "command", SLEN ("command"), &cmd); - if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get cmd " - "ccommand"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; + gf_msg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_UNSERIALIZE_FAIL, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf(err_str, sizeof(err_str), + "Unable to decode " + "the command"); + goto out; + } + } + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get volume " + "name"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + ret = dict_get_int32n(dict, "count", SLEN("count"), &count); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get brick " + "count"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname); + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s", + err_str); + goto out; + } + + if ((volinfo->type == GF_CLUSTER_TYPE_TIER) && + (glusterd_is_tiering_supported(err_str) == _gf_false)) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED, + "Tiering not supported at this version"); + ret = -1; + goto out; + } + + ret = dict_get_int32n(dict, "command", SLEN("command"), &cmd); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get cmd " + "ccommand"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + ret = glusterd_disallow_op_for_tier(volinfo, GD_OP_REMOVE_BRICK, cmd); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Removing brick from a Tier volume is not allowed"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_UNSUPPORTED, "%s", + err_str); + goto out; + } + + ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), + &replica_count); + if (!ret) { + gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED, + "request to change replica-count to %d", replica_count); + ret = gd_rmbr_validate_replica_count(volinfo, replica_count, count, + err_str, sizeof(err_str)); + if (ret < 0) { + /* logging and error msg are done in above function + itself */ + goto out; } - - ret = glusterd_disallow_op_for_tier (volinfo, GD_OP_REMOVE_BRICK, cmd); + dict_deln(dict, "replica-count", SLEN("replica-count")); if (ret) { - snprintf (err_str, sizeof (err_str), - "Removing brick from a Tier volume is not allowed"); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_OP_UNSUPPORTED, "%s", err_str); - goto out; - } - - ret = dict_get_int32n (dict, "replica-count", SLEN ("replica-count"), - &replica_count); - if (!ret) { - gf_msg (this->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_FAILED, - "request to change replica-count to %d", replica_count); - ret = gd_rmbr_validate_replica_count (volinfo, replica_count, - count, err_str, - sizeof (err_str)); - if (ret < 0) { - /* logging and error msg are done in above function - itself */ - goto out; - } - dict_deln (dict, "replica-count", SLEN ("replica-count")); - if (ret) { - replica_count = 0; - } else { - ret = dict_set_int32n (dict, "replica-count", - SLEN ("replica-count"), - replica_count); - if (ret) { - gf_msg (this->name, GF_LOG_WARNING, errno, - GD_MSG_DICT_SET_FAILED, - "failed to set the replica_count " - "in dict"); - goto out; - } - } - } - - /* 'vol_type' is used for giving the meaning full error msg for user */ - if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) { - strcpy (vol_type, "replica"); - } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE) { - strcpy (vol_type, "stripe"); - } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) { - strcpy (vol_type, "stripe-replicate"); - } else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) { - strcpy (vol_type, "disperse"); + replica_count = 0; } else { - strcpy (vol_type, "distribute"); - } + ret = dict_set_int32n(dict, "replica-count", SLEN("replica-count"), + replica_count); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, errno, + GD_MSG_DICT_SET_FAILED, + "failed to set the replica_count " + "in dict"); + goto out; + } + } + } + + /* 'vol_type' is used for giving the meaning full error msg for user */ + if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) { + strcpy(vol_type, "replica"); + } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE) { + strcpy(vol_type, "stripe"); + } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) { + strcpy(vol_type, "stripe-replicate"); + } else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) { + strcpy(vol_type, "disperse"); + } else { + strcpy(vol_type, "distribute"); + } + + /* Do not allow remove-brick if the volume is a stripe volume*/ + if ((volinfo->type == GF_CLUSTER_TYPE_STRIPE) && + (volinfo->brick_count == volinfo->stripe_count)) { + snprintf(err_str, sizeof(err_str), + "Removing brick from a stripe volume is not allowed"); + gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED, "%s", + err_str); + ret = -1; + goto out; + } + + if (!replica_count && (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) && + (volinfo->brick_count == volinfo->dist_leaf_count)) { + snprintf(err_str, sizeof(err_str), + "Removing bricks from stripe-replicate" + " configuration is not allowed without reducing " + "replica or stripe count explicitly."); + gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED_AC_REQD, + "%s", err_str); + ret = -1; + goto out; + } + + if (!replica_count && (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) && + (volinfo->brick_count == volinfo->dist_leaf_count)) { + snprintf(err_str, sizeof(err_str), + "Removing bricks from replicate configuration " + "is not allowed without reducing replica count " + "explicitly."); + gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED_AC_REQD, + "%s", err_str); + ret = -1; + goto out; + } + + /* Do not allow remove-brick if the bricks given is less than + the replica count or stripe count */ + if (!replica_count && (volinfo->type != GF_CLUSTER_TYPE_NONE) && + (volinfo->type != GF_CLUSTER_TYPE_TIER)) { + if (volinfo->dist_leaf_count && (count % volinfo->dist_leaf_count)) { + snprintf(err_str, sizeof(err_str), + "Remove brick " + "incorrect brick count of %d for %s %d", + count, vol_type, volinfo->dist_leaf_count); + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s", + err_str); + ret = -1; + goto out; + } + } + + /* subvol match is not required for tiered volume*/ + if ((volinfo->type != GF_CLUSTER_TYPE_NONE) && + (volinfo->type != GF_CLUSTER_TYPE_TIER) && + (volinfo->subvol_count > 1)) { + ret = subvol_matcher_init(&subvols, volinfo->subvol_count); + if (ret) + goto out; + } - /* Do not allow remove-brick if the volume is a stripe volume*/ - if ((volinfo->type == GF_CLUSTER_TYPE_STRIPE) && - (volinfo->brick_count == volinfo->stripe_count)) { - snprintf (err_str, sizeof (err_str), - "Removing brick from a stripe volume is not allowed"); - gf_msg (this->name, GF_LOG_ERROR, EPERM, - GD_MSG_OP_NOT_PERMITTED, "%s", err_str); - ret = -1; - goto out; - } - - if (!replica_count && - (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) && - (volinfo->brick_count == volinfo->dist_leaf_count)) { - snprintf (err_str, sizeof(err_str), - "Removing bricks from stripe-replicate" - " configuration is not allowed without reducing " - "replica or stripe count explicitly."); - gf_msg (this->name, GF_LOG_ERROR, EPERM, - GD_MSG_OP_NOT_PERMITTED_AC_REQD, "%s", err_str); - ret = -1; - goto out; - } + if (volinfo->type == GF_CLUSTER_TYPE_TIER) + count = glusterd_set_detach_bricks(dict, volinfo); - if (!replica_count && - (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) && - (volinfo->brick_count == volinfo->dist_leaf_count)) { - snprintf (err_str, sizeof (err_str), - "Removing bricks from replicate configuration " - "is not allowed without reducing replica count " - "explicitly."); - gf_msg (this->name, GF_LOG_ERROR, EPERM, - GD_MSG_OP_NOT_PERMITTED_AC_REQD, "%s", err_str); - ret = -1; - goto out; - } - - /* Do not allow remove-brick if the bricks given is less than - the replica count or stripe count */ - if (!replica_count && (volinfo->type != GF_CLUSTER_TYPE_NONE) && - (volinfo->type != GF_CLUSTER_TYPE_TIER)) { - if (volinfo->dist_leaf_count && - (count % volinfo->dist_leaf_count)) { - snprintf (err_str, sizeof (err_str), "Remove brick " - "incorrect brick count of %d for %s %d", - count, vol_type, volinfo->dist_leaf_count); - gf_msg (this->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); - ret = -1; - goto out; - } - } + brickinfo_list = GF_CALLOC(count, sizeof(*brickinfo_list), + gf_common_mt_pointer); + if (!brickinfo_list) { + ret = -1; + goto out; + } - /* subvol match is not required for tiered volume*/ - if ((volinfo->type != GF_CLUSTER_TYPE_NONE) && - (volinfo->type != GF_CLUSTER_TYPE_TIER) && - (volinfo->subvol_count > 1)) { - ret = subvol_matcher_init (&subvols, volinfo->subvol_count); - if (ret) - goto out; + while (i <= count) { + keylen = snprintf(key, sizeof(key), "brick%d", i); + ret = dict_get_strn(dict, key, keylen, &brick); + if (ret) { + snprintf(err_str, sizeof(err_str), "Unable to get %s", key); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "%s", err_str); + goto out; } + gf_msg_debug(this->name, 0, + "Remove brick count %d brick:" + " %s", + i, brick); - if (volinfo->type == GF_CLUSTER_TYPE_TIER) - count = glusterd_set_detach_bricks(dict, volinfo); - - brickinfo_list = GF_CALLOC (count, sizeof (*brickinfo_list), - gf_common_mt_pointer); - if (!brickinfo_list) { - ret = -1; - goto out; - } + ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo, + _gf_false); - while ( i <= count) { - keylen = snprintf (key, sizeof (key), "brick%d", i); - ret = dict_get_strn (dict, key, keylen, &brick); - if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get %s", - key); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; - } - gf_msg_debug (this->name, 0, "Remove brick count %d brick:" - " %s", i, brick); - - ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, - &brickinfo, - _gf_false); - - if (ret) { - snprintf (err_str, sizeof (err_str), "Incorrect brick " - "%s for volume %s", brick, volname); - gf_msg (this->name, GF_LOG_ERROR, EINVAL, - GD_MSG_BRICK_NOT_FOUND, "%s", err_str); - goto out; - } - brickinfo_list[i-1] = brickinfo; - - i++; - if ((volinfo->type == GF_CLUSTER_TYPE_NONE) || - (volinfo->brick_count <= volinfo->dist_leaf_count)) - continue; - - /* Find which subvolume the brick belongs to. - * subvol match is not required for tiered volume - * - */ - if (volinfo->type != GF_CLUSTER_TYPE_TIER) - subvol_matcher_update (subvols, volinfo, brickinfo); - } - - /* Check if the bricks belong to the same subvolumes.*/ - /* subvol match is not required for tiered volume*/ - if ((volinfo->type != GF_CLUSTER_TYPE_NONE) && - (volinfo->type != GF_CLUSTER_TYPE_TIER) && - (volinfo->subvol_count > 1)) { - ret = subvol_matcher_verify (subvols, volinfo, - err_str, sizeof(err_str), - vol_type, replica_count); - if (ret) - goto out; - } - - ret = glusterd_remove_brick_validate_arbiters (volinfo, count, - replica_count, - brickinfo_list, - err_str, - sizeof (err_str)); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Incorrect brick " + "%s for volume %s", + brick, volname); + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_BRICK_NOT_FOUND, + "%s", err_str); + goto out; + } + brickinfo_list[i - 1] = brickinfo; + + i++; + if ((volinfo->type == GF_CLUSTER_TYPE_NONE) || + (volinfo->brick_count <= volinfo->dist_leaf_count)) + continue; + + /* Find which subvolume the brick belongs to. + * subvol match is not required for tiered volume + * + */ + if (volinfo->type != GF_CLUSTER_TYPE_TIER) + subvol_matcher_update(subvols, volinfo, brickinfo); + } + + /* Check if the bricks belong to the same subvolumes.*/ + /* subvol match is not required for tiered volume*/ + if ((volinfo->type != GF_CLUSTER_TYPE_NONE) && + (volinfo->type != GF_CLUSTER_TYPE_TIER) && + (volinfo->subvol_count > 1)) { + ret = subvol_matcher_verify(subvols, volinfo, err_str, sizeof(err_str), + vol_type, replica_count); if (ret) - goto out; + goto out; + } - ret = glusterd_op_begin_synctask (req, GD_OP_REMOVE_BRICK, dict); + ret = glusterd_remove_brick_validate_arbiters(volinfo, count, replica_count, + brickinfo_list, err_str, + sizeof(err_str)); + if (ret) + goto out; -out: - if (ret) { - rsp.op_ret = -1; - rsp.op_errno = 0; - if (err_str[0] == '\0') - snprintf (err_str, sizeof (err_str), - "Operation failed"); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GLUSTERD_OP_FAILED, "%s", err_str); - rsp.op_errstr = err_str; - cli_rsp = &rsp; - glusterd_to_cli (req, cli_rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf_cli_rsp, dict); + ret = glusterd_op_begin_synctask(req, GD_OP_REMOVE_BRICK, dict); - ret = 0; //sent error to cli, prevent second reply +out: + if (ret) { + rsp.op_ret = -1; + rsp.op_errno = 0; + if (err_str[0] == '\0') + snprintf(err_str, sizeof(err_str), "Operation failed"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_OP_FAILED, "%s", + err_str); + rsp.op_errstr = err_str; + cli_rsp = &rsp; + glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, + dict); - } + ret = 0; // sent error to cli, prevent second reply + } - if (brickinfo_list) - GF_FREE (brickinfo_list); - subvol_matcher_destroy (subvols); - free (cli_req.dict.dict_val); //its malloced by xdr + if (brickinfo_list) + GF_FREE(brickinfo_list); + subvol_matcher_destroy(subvols); + free(cli_req.dict.dict_val); // its malloced by xdr - return ret; + return ret; } int -glusterd_handle_remove_brick (rpcsvc_request_t *req) +glusterd_handle_remove_brick(rpcsvc_request_t *req) { - return glusterd_big_locked_handler (req, - __glusterd_handle_remove_brick); + return glusterd_big_locked_handler(req, __glusterd_handle_remove_brick); } static int -_glusterd_restart_gsync_session (dict_t *this, char *key, - data_t *value, void *data) +_glusterd_restart_gsync_session(dict_t *this, char *key, data_t *value, + void *data) { - char *slave = NULL; - char *slave_buf = NULL; - char *path_list = NULL; - char *slave_vol = NULL; - char *slave_host = NULL; - char *slave_url = NULL; - char *conf_path = NULL; - char **errmsg = NULL; - int ret = -1; - glusterd_gsync_status_temp_t *param = NULL; - gf_boolean_t is_running = _gf_false; - - param = (glusterd_gsync_status_temp_t *)data; - - GF_ASSERT (param); - GF_ASSERT (param->volinfo); - - slave = strchr(value->data, ':'); - if (slave) { - slave++; - slave_buf = gf_strdup (slave); - if (!slave_buf) { - gf_msg ("glusterd", GF_LOG_ERROR, ENOMEM, - GD_MSG_NO_MEMORY, - "Failed to gf_strdup"); - ret = -1; - goto out; - } - } - else - return 0; - - ret = dict_set_dynstrn (param->rsp_dict, "slave", SLEN ("slave"), - slave_buf); - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, - "Unable to store slave"); - if (slave_buf) - GF_FREE(slave_buf); - goto out; - } - - ret = glusterd_get_slave_details_confpath (param->volinfo, - param->rsp_dict, &slave_url, - &slave_host, &slave_vol, - &conf_path, errmsg); - if (ret) { - if (*errmsg) - gf_msg ("glusterd", GF_LOG_ERROR, 0, - GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL, - "%s", *errmsg); - else - gf_msg ("glusterd", GF_LOG_ERROR, 0, - GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL, - "Unable to fetch slave or confpath details."); - goto out; - } - - /* In cases that gsyncd is not running, we will not invoke it - * because of add-brick. */ - ret = glusterd_check_gsync_running_local (param->volinfo->volname, - slave, conf_path, - &is_running); - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, 0, - GD_MSG_GSYNC_VALIDATION_FAIL, "gsync running validation failed."); - goto out; - } - if (_gf_false == is_running) { - gf_msg_debug ("glusterd", 0, "gsync session for %s and %s is" - " not running on this node. Hence not restarting.", - param->volinfo->volname, slave); - ret = 0; - goto out; - } + char *slave = NULL; + char *slave_buf = NULL; + char *path_list = NULL; + char *slave_vol = NULL; + char *slave_host = NULL; + char *slave_url = NULL; + char *conf_path = NULL; + char **errmsg = NULL; + int ret = -1; + glusterd_gsync_status_temp_t *param = NULL; + gf_boolean_t is_running = _gf_false; + + param = (glusterd_gsync_status_temp_t *)data; + + GF_ASSERT(param); + GF_ASSERT(param->volinfo); + + slave = strchr(value->data, ':'); + if (slave) { + slave++; + slave_buf = gf_strdup(slave); + if (!slave_buf) { + gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, + "Failed to gf_strdup"); + ret = -1; + goto out; + } + } else + return 0; - ret = glusterd_get_local_brickpaths (param->volinfo, &path_list); - if (!path_list) { - gf_msg_debug ("glusterd", 0, "This node not being part of" - " volume should not be running gsyncd. Hence" - " no gsyncd process to restart."); - ret = 0; - goto out; - } + ret = dict_set_dynstrn(param->rsp_dict, "slave", SLEN("slave"), slave_buf); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Unable to store slave"); + if (slave_buf) + GF_FREE(slave_buf); + goto out; + } + + ret = glusterd_get_slave_details_confpath(param->volinfo, param->rsp_dict, + &slave_url, &slave_host, + &slave_vol, &conf_path, errmsg); + if (ret) { + if (*errmsg) + gf_msg("glusterd", GF_LOG_ERROR, 0, + GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL, "%s", *errmsg); + else + gf_msg("glusterd", GF_LOG_ERROR, 0, + GD_MSG_SLAVE_CONFPATH_DETAILS_FETCH_FAIL, + "Unable to fetch slave or confpath details."); + goto out; + } + + /* In cases that gsyncd is not running, we will not invoke it + * because of add-brick. */ + ret = glusterd_check_gsync_running_local(param->volinfo->volname, slave, + conf_path, &is_running); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_GSYNC_VALIDATION_FAIL, + "gsync running validation failed."); + goto out; + } + if (_gf_false == is_running) { + gf_msg_debug("glusterd", 0, + "gsync session for %s and %s is" + " not running on this node. Hence not restarting.", + param->volinfo->volname, slave); + ret = 0; + goto out; + } + + ret = glusterd_get_local_brickpaths(param->volinfo, &path_list); + if (!path_list) { + gf_msg_debug("glusterd", 0, + "This node not being part of" + " volume should not be running gsyncd. Hence" + " no gsyncd process to restart."); + ret = 0; + goto out; + } - ret = glusterd_check_restart_gsync_session (param->volinfo, slave, - param->rsp_dict, path_list, - conf_path, 0); - if (ret) - gf_msg ("glusterd", GF_LOG_ERROR, 0, - GD_MSG_GSYNC_RESTART_FAIL, - "Unable to restart gsync session."); + ret = glusterd_check_restart_gsync_session( + param->volinfo, slave, param->rsp_dict, path_list, conf_path, 0); + if (ret) + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_GSYNC_RESTART_FAIL, + "Unable to restart gsync session."); out: - gf_msg_debug ("glusterd", 0, "Returning %d.", ret); - return ret; + gf_msg_debug("glusterd", 0, "Returning %d.", ret); + return ret; } /* op-sm */ int -glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count, - char *bricks, dict_t *dict) +glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count, + char *bricks, dict_t *dict) { - char *brick = NULL; - int32_t i = 1; - char *brick_list = NULL; - char *free_ptr1 = NULL; - char *free_ptr2 = NULL; - char *saveptr = NULL; - int32_t ret = -1; - int32_t stripe_count = 0; - int32_t replica_count = 0; - int32_t arbiter_count = 0; - int32_t type = 0; - glusterd_brickinfo_t *brickinfo = NULL; - glusterd_gsync_status_temp_t param = {0, }; - gf_boolean_t restart_needed = 0; - int caps = 0; - int brickid = 0; - char key[64] = ""; - char *brick_mount_dir = NULL; - xlator_t *this = NULL; - glusterd_conf_t *conf = NULL; - gf_boolean_t is_valid_add_brick = _gf_false; - struct statvfs brickstat = {0,}; - - this = THIS; - GF_ASSERT (this); - GF_ASSERT (volinfo); - - conf = this->private; - GF_ASSERT (conf); - - if (bricks) { - brick_list = gf_strdup (bricks); - free_ptr1 = brick_list; - } - - if (count) - brick = strtok_r (brick_list+1, " \n", &saveptr); - - if (dict) { - ret = dict_get_int32n (dict, "stripe-count", - SLEN ("stripe-count"), &stripe_count); - if (!ret) - gf_msg (THIS->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_SUCCESS, - "stripe-count is set %d", stripe_count); - - ret = dict_get_int32n (dict, "replica-count", - SLEN ("replica-count"), &replica_count); - if (!ret) - gf_msg (THIS->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_SUCCESS, - "replica-count is set %d", replica_count); - ret = dict_get_int32n (dict, "arbiter-count", - SLEN ("arbiter-count"), &arbiter_count); - if (!ret) - gf_msg (THIS->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_SUCCESS, - "arbiter-count is set %d", arbiter_count); - ret = dict_get_int32n (dict, "type", SLEN ("type"), &type); - if (!ret) - gf_msg (THIS->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_SUCCESS, - "type is set %d, need to change it", type); - } - - brickid = glusterd_get_next_available_brickid (volinfo); - if (brickid < 0) - goto out; - while ( i <= count) { - ret = glusterd_brickinfo_new_from_brick (brick, &brickinfo, - _gf_true, NULL); - if (ret) - goto out; - - GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO (brickinfo, volinfo, - brickid++); - - /* A bricks mount dir is required only by snapshots which were - * introduced in gluster-3.6.0 - */ - if (conf->op_version >= GD_OP_VERSION_3_6_0) { - brick_mount_dir = NULL; - - snprintf (key, sizeof(key), "brick%d.mount_dir", i); - ret = dict_get_str (dict, key, &brick_mount_dir); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, - "%s not present", key); - goto out; - } - strncpy (brickinfo->mount_dir, brick_mount_dir, - sizeof(brickinfo->mount_dir)); - } + char *brick = NULL; + int32_t i = 1; + char *brick_list = NULL; + char *free_ptr1 = NULL; + char *free_ptr2 = NULL; + char *saveptr = NULL; + int32_t ret = -1; + int32_t stripe_count = 0; + int32_t replica_count = 0; + int32_t arbiter_count = 0; + int32_t type = 0; + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_gsync_status_temp_t param = { + 0, + }; + gf_boolean_t restart_needed = 0; + int caps = 0; + int brickid = 0; + char key[64] = ""; + char *brick_mount_dir = NULL; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + gf_boolean_t is_valid_add_brick = _gf_false; + struct statvfs brickstat = { + 0, + }; + + this = THIS; + GF_ASSERT(this); + GF_ASSERT(volinfo); + + conf = this->private; + GF_ASSERT(conf); + + if (bricks) { + brick_list = gf_strdup(bricks); + free_ptr1 = brick_list; + } + + if (count) + brick = strtok_r(brick_list + 1, " \n", &saveptr); + + if (dict) { + ret = dict_get_int32n(dict, "stripe-count", SLEN("stripe-count"), + &stripe_count); + if (!ret) + gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS, + "stripe-count is set %d", stripe_count); - ret = glusterd_resolve_brick (brickinfo); - if (ret) - goto out; - - if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) { - ret = sys_statvfs (brickinfo->path, &brickstat); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_STATVFS_FAILED, - "Failed to fetch disk utilization " - "from the brick (%s:%s). Please check the health of " - "the brick. Error code was %s", - brickinfo->hostname, brickinfo->path, - strerror (errno)); - - goto out; - } - brickinfo->statfs_fsid = brickstat.f_fsid; - } - /* hot tier bricks are added to head of brick list */ - if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) { - cds_list_add (&brickinfo->brick_list, &volinfo->bricks); - } else if (stripe_count || replica_count) { - add_brick_at_right_order (brickinfo, volinfo, (i - 1), - stripe_count, replica_count); - } else { - cds_list_add_tail (&brickinfo->brick_list, - &volinfo->bricks); - } - brick = strtok_r (NULL, " \n", &saveptr); - i++; - volinfo->brick_count++; + ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), + &replica_count); + if (!ret) + gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS, + "replica-count is set %d", replica_count); + ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"), + &arbiter_count); + if (!ret) + gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS, + "arbiter-count is set %d", arbiter_count); + ret = dict_get_int32n(dict, "type", SLEN("type"), &type); + if (!ret) + gf_msg(THIS->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS, + "type is set %d, need to change it", type); + } + + brickid = glusterd_get_next_available_brickid(volinfo); + if (brickid < 0) + goto out; + while (i <= count) { + ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true, + NULL); + if (ret) + goto out; - } + GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO(brickinfo, volinfo, brickid++); - /* Gets changed only if the options are given in add-brick cli */ - if (type) - volinfo->type = type; - /* performance.client-io-threads is turned on by default, - * however this has adverse effects on replicate volumes due to - * replication design issues, till that get addressed - * performance.client-io-threads option is turned off for all - * replicate volumes if not already explicitly enabled. + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 */ - if (type && glusterd_is_volume_replicate (volinfo) && - conf->op_version >= GD_OP_VERSION_3_12_2) { - ret = dict_set_nstrn (volinfo->dict, - "performance.client-io-threads", - SLEN ("performance.client-io-threads"), - "off", SLEN ("off")); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_SET_FAILED, "Failed to set " - "performance.client-io-threads to off"); - goto out; - } - } + if (conf->op_version >= GD_OP_VERSION_3_6_0) { + brick_mount_dir = NULL; - if (replica_count) { - volinfo->replica_count = replica_count; - } - if (arbiter_count) { - volinfo->arbiter_count = arbiter_count; - } - if (stripe_count) { - volinfo->stripe_count = stripe_count; + snprintf(key, sizeof(key), "brick%d.mount_dir", i); + ret = dict_get_str(dict, key, &brick_mount_dir); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "%s not present", key); + goto out; + } + strncpy(brickinfo->mount_dir, brick_mount_dir, + sizeof(brickinfo->mount_dir)); } - volinfo->dist_leaf_count = glusterd_get_dist_leaf_count (volinfo); - - /* backward compatibility */ - volinfo->sub_count = ((volinfo->dist_leaf_count == 1) ? 0: - volinfo->dist_leaf_count); - - volinfo->subvol_count = (volinfo->brick_count / - volinfo->dist_leaf_count); - ret = 0; - if (GLUSTERD_STATUS_STARTED != volinfo->status) - goto generate_volfiles; - - ret = generate_brick_volfiles (volinfo); + ret = glusterd_resolve_brick(brickinfo); if (ret) - goto out; - - brick_list = gf_strdup (bricks); - free_ptr2 = brick_list; - i = 1; - - if (count) - brick = strtok_r (brick_list+1, " \n", &saveptr); + goto out; + + if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) { + ret = sys_statvfs(brickinfo->path, &brickstat); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_STATVFS_FAILED, + "Failed to fetch disk utilization " + "from the brick (%s:%s). Please check the health of " + "the brick. Error code was %s", + brickinfo->hostname, brickinfo->path, strerror(errno)); + + goto out; + } + brickinfo->statfs_fsid = brickstat.f_fsid; + } + /* hot tier bricks are added to head of brick list */ + if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) { + cds_list_add(&brickinfo->brick_list, &volinfo->bricks); + } else if (stripe_count || replica_count) { + add_brick_at_right_order(brickinfo, volinfo, (i - 1), stripe_count, + replica_count); + } else { + cds_list_add_tail(&brickinfo->brick_list, &volinfo->bricks); + } + brick = strtok_r(NULL, " \n", &saveptr); + i++; + volinfo->brick_count++; + } + + /* Gets changed only if the options are given in add-brick cli */ + if (type) + volinfo->type = type; + /* performance.client-io-threads is turned on by default, + * however this has adverse effects on replicate volumes due to + * replication design issues, till that get addressed + * performance.client-io-threads option is turned off for all + * replicate volumes if not already explicitly enabled. + */ + if (type && glusterd_is_volume_replicate(volinfo) && + conf->op_version >= GD_OP_VERSION_3_12_2) { + ret = dict_set_nstrn(volinfo->dict, "performance.client-io-threads", + SLEN("performance.client-io-threads"), "off", + SLEN("off")); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Failed to set " + "performance.client-io-threads to off"); + goto out; + } + } + + if (replica_count) { + volinfo->replica_count = replica_count; + } + if (arbiter_count) { + volinfo->arbiter_count = arbiter_count; + } + if (stripe_count) { + volinfo->stripe_count = stripe_count; + } + volinfo->dist_leaf_count = glusterd_get_dist_leaf_count(volinfo); + + /* backward compatibility */ + volinfo->sub_count = ((volinfo->dist_leaf_count == 1) + ? 0 + : volinfo->dist_leaf_count); + + volinfo->subvol_count = (volinfo->brick_count / volinfo->dist_leaf_count); + + ret = 0; + if (GLUSTERD_STATUS_STARTED != volinfo->status) + goto generate_volfiles; + + ret = generate_brick_volfiles(volinfo); + if (ret) + goto out; + + brick_list = gf_strdup(bricks); + free_ptr2 = brick_list; + i = 1; + + if (count) + brick = strtok_r(brick_list + 1, " \n", &saveptr); #ifdef HAVE_BD_XLATOR - if (brickinfo->vg[0]) - caps = CAPS_BD | CAPS_THIN | - CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT; + if (brickinfo->vg[0]) + caps = CAPS_BD | CAPS_THIN | CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT; #endif - /* This check needs to be added to distinguish between - * attach-tier commands and add-brick commands. - * When a tier is attached, adding is done via add-brick - * and setting of pending xattrs shouldn't be done for - * attach-tiers as they are virtually new volumes. - */ - if (glusterd_is_volume_replicate (volinfo)) { - if (replica_count && - !dict_getn (dict, "attach-tier", SLEN ("attach-tier")) && - conf->op_version >= GD_OP_VERSION_3_7_10) { - is_valid_add_brick = _gf_true; - ret = generate_dummy_client_volfiles (volinfo); - if (ret) { - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_VOLFILE_CREATE_FAIL, - "Failed to create volfile."); - goto out; - } - } - } - - while (i <= count) { - ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo, - &brickinfo, - _gf_true); - if (ret) - goto out; + /* This check needs to be added to distinguish between + * attach-tier commands and add-brick commands. + * When a tier is attached, adding is done via add-brick + * and setting of pending xattrs shouldn't be done for + * attach-tiers as they are virtually new volumes. + */ + if (glusterd_is_volume_replicate(volinfo)) { + if (replica_count && + !dict_getn(dict, "attach-tier", SLEN("attach-tier")) && + conf->op_version >= GD_OP_VERSION_3_7_10) { + is_valid_add_brick = _gf_true; + ret = generate_dummy_client_volfiles(volinfo); + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, + "Failed to create volfile."); + goto out; + } + } + } + + while (i <= count) { + ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo, + _gf_true); + if (ret) + goto out; #ifdef HAVE_BD_XLATOR - char msg[1024] = ""; - /* Check for VG/thin pool if its BD volume */ - if (brickinfo->vg[0]) { - ret = glusterd_is_valid_vg (brickinfo, 0, msg); - if (ret) { - gf_msg (THIS->name, GF_LOG_CRITICAL, 0, - GD_MSG_INVALID_VG, "%s", msg); - goto out; - } - /* if anyone of the brick does not have thin support, - disable it for entire volume */ - caps &= brickinfo->caps; - } else - caps = 0; + char msg[1024] = ""; + /* Check for VG/thin pool if its BD volume */ + if (brickinfo->vg[0]) { + ret = glusterd_is_valid_vg(brickinfo, 0, msg); + if (ret) { + gf_msg(THIS->name, GF_LOG_CRITICAL, 0, GD_MSG_INVALID_VG, "%s", + msg); + goto out; + } + /* if anyone of the brick does not have thin support, + disable it for entire volume */ + caps &= brickinfo->caps; + } else + caps = 0; #endif - if (gf_uuid_is_null (brickinfo->uuid)) { - ret = glusterd_resolve_brick (brickinfo); - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, 0, - GD_MSG_RESOLVE_BRICK_FAIL, - FMTSTR_RESOLVE_BRICK, - brickinfo->hostname, brickinfo->path); - goto out; - } - } - - /* if the volume is a replicate volume, do: */ - if (is_valid_add_brick) { - if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) { - ret = glusterd_handle_replicate_brick_ops ( - volinfo, brickinfo, - GD_OP_ADD_BRICK); - if (ret < 0) - goto out; - } - } - ret = glusterd_brick_start (volinfo, brickinfo, - _gf_true, _gf_false); - if (ret) - goto out; - i++; - brick = strtok_r (NULL, " \n", &saveptr); - - /* Check if the brick is added in this node, and set - * the restart_needed flag. */ - if ((!gf_uuid_compare (brickinfo->uuid, MY_UUID)) && - !restart_needed) { - restart_needed = 1; - gf_msg_debug ("glusterd", 0, - "Restart gsyncd session, if it's already " - "running."); - } + if (gf_uuid_is_null(brickinfo->uuid)) { + ret = glusterd_resolve_brick(brickinfo); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_RESOLVE_BRICK_FAIL, + FMTSTR_RESOLVE_BRICK, brickinfo->hostname, + brickinfo->path); + goto out; + } } - /* If the restart_needed flag is set, restart gsyncd sessions for that - * particular master with all the slaves. */ - if (restart_needed) { - param.rsp_dict = dict; - param.volinfo = volinfo; - dict_foreach (volinfo->gsync_slaves, - _glusterd_restart_gsync_session, ¶m); + /* if the volume is a replicate volume, do: */ + if (is_valid_add_brick) { + if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) { + ret = glusterd_handle_replicate_brick_ops(volinfo, brickinfo, + GD_OP_ADD_BRICK); + if (ret < 0) + goto out; + } } - volinfo->caps = caps; + ret = glusterd_brick_start(volinfo, brickinfo, _gf_true, _gf_false); + if (ret) + goto out; + i++; + brick = strtok_r(NULL, " \n", &saveptr); + + /* Check if the brick is added in this node, and set + * the restart_needed flag. */ + if ((!gf_uuid_compare(brickinfo->uuid, MY_UUID)) && !restart_needed) { + restart_needed = 1; + gf_msg_debug("glusterd", 0, + "Restart gsyncd session, if it's already " + "running."); + } + } + + /* If the restart_needed flag is set, restart gsyncd sessions for that + * particular master with all the slaves. */ + if (restart_needed) { + param.rsp_dict = dict; + param.volinfo = volinfo; + dict_foreach(volinfo->gsync_slaves, _glusterd_restart_gsync_session, + ¶m); + } + volinfo->caps = caps; generate_volfiles: - if (conf->op_version <= GD_OP_VERSION_3_7_5) { - ret = glusterd_create_volfiles_and_notify_services (volinfo); - } else { - /* - * The cluster is operating at version greater than - * gluster-3.7.5. So no need to sent volfile fetch - * request in commit phase, the same will be done - * in post validate phase with v3 framework. - */ - } + if (conf->op_version <= GD_OP_VERSION_3_7_5) { + ret = glusterd_create_volfiles_and_notify_services(volinfo); + } else { + /* + * The cluster is operating at version greater than + * gluster-3.7.5. So no need to sent volfile fetch + * request in commit phase, the same will be done + * in post validate phase with v3 framework. + */ + } out: - GF_FREE (free_ptr1); - GF_FREE (free_ptr2); + GF_FREE(free_ptr1); + GF_FREE(free_ptr2); - gf_msg_debug ("glusterd", 0, "Returning %d", ret); - return ret; + gf_msg_debug("glusterd", 0, "Returning %d", ret); + return ret; } - int -glusterd_op_perform_remove_brick (glusterd_volinfo_t *volinfo, char *brick, - int force, int *need_migrate) +glusterd_op_perform_remove_brick(glusterd_volinfo_t *volinfo, char *brick, + int force, int *need_migrate) { - glusterd_brickinfo_t *brickinfo = NULL; - int32_t ret = -1; - glusterd_conf_t *priv = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + int32_t ret = -1; + glusterd_conf_t *priv = NULL; - GF_ASSERT (volinfo); - GF_ASSERT (brick); + GF_ASSERT(volinfo); + GF_ASSERT(brick); - priv = THIS->private; - GF_ASSERT (priv); + priv = THIS->private; + GF_ASSERT(priv); - ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo, - &brickinfo, - _gf_false); - if (ret) - goto out; + ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo, + _gf_false); + if (ret) + goto out; - ret = glusterd_resolve_brick (brickinfo); - if (ret) - goto out; + ret = glusterd_resolve_brick(brickinfo); + if (ret) + goto out; - glusterd_volinfo_reset_defrag_stats (volinfo); + glusterd_volinfo_reset_defrag_stats(volinfo); - if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) { - /* Only if the brick is in this glusterd, do the rebalance */ - if (need_migrate) - *need_migrate = 1; - } + if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) { + /* Only if the brick is in this glusterd, do the rebalance */ + if (need_migrate) + *need_migrate = 1; + } - if (force) { - ret = glusterd_brick_stop (volinfo, brickinfo, - _gf_true); - if (ret) { - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_BRICK_STOP_FAIL, "Unable to stop " - "glusterfs, ret: %d", ret); - } - goto out; + if (force) { + ret = glusterd_brick_stop(volinfo, brickinfo, _gf_true); + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_STOP_FAIL, + "Unable to stop " + "glusterfs, ret: %d", + ret); } + goto out; + } - brickinfo->decommissioned = 1; - ret = 0; + brickinfo->decommissioned = 1; + ret = 0; out: - gf_msg_debug ("glusterd", 0, "Returning %d", ret); - return ret; + gf_msg_debug("glusterd", 0, "Returning %d", ret); + return ret; } int -glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict) +glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) { - int ret = 0; - char *volname = NULL; - int count = 0; - int replica_count = 0; - int arbiter_count = 0; - int i = 0; - int32_t local_brick_count = 0; - char *bricks = NULL; - char *brick_list = NULL; - char *saveptr = NULL; - char *free_ptr = NULL; - char *brick = NULL; - glusterd_brickinfo_t *brickinfo = NULL; - glusterd_volinfo_t *volinfo = NULL; - xlator_t *this = NULL; - char msg[4096] = ""; - char key[64] = ""; - gf_boolean_t brick_alloc = _gf_false; - char *all_bricks = NULL; - char *str_ret = NULL; - gf_boolean_t is_force = _gf_false; - glusterd_conf_t *conf = NULL; - int32_t len = 0; - - this = THIS; - GF_ASSERT (this); - conf = this->private; - GF_ASSERT (conf); - - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - if (ret) { - gf_msg (THIS->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, - "Unable to get volume name"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); + int ret = 0; + char *volname = NULL; + int count = 0; + int replica_count = 0; + int arbiter_count = 0; + int i = 0; + int32_t local_brick_count = 0; + char *bricks = NULL; + char *brick_list = NULL; + char *saveptr = NULL; + char *free_ptr = NULL; + char *brick = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = NULL; + char msg[4096] = ""; + char key[64] = ""; + gf_boolean_t brick_alloc = _gf_false; + char *all_bricks = NULL; + char *str_ret = NULL; + gf_boolean_t is_force = _gf_false; + glusterd_conf_t *conf = NULL; + int32_t len = 0; + + this = THIS; + GF_ASSERT(this); + conf = this->private; + GF_ASSERT(conf); + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, + "Unable to find volume: %s", volname); + goto out; + } + + ret = glusterd_validate_volume_id(dict, volinfo); + if (ret) + goto out; + + ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), + &replica_count); + if (ret) { + gf_msg_debug(THIS->name, 0, "Unable to get replica count"); + } + + ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"), + &arbiter_count); + if (ret) { + gf_msg_debug(THIS->name, 0, "No arbiter count present in the dict"); + } + + if (replica_count > 0) { + ret = op_version_check(this, GD_OP_VER_PERSISTENT_AFR_XATTRS, msg, + sizeof(msg)); if (ret) { - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_NOT_FOUND, - "Unable to find volume: %s", volname); - goto out; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH, + "%s", msg); + *op_errstr = gf_strdup(msg); + goto out; } + } - ret = glusterd_validate_volume_id (dict, volinfo); - if (ret) - goto out; - - ret = dict_get_int32n (dict, "replica-count", SLEN ("replica-count"), - &replica_count); - if (ret) { - gf_msg_debug (THIS->name, 0, - "Unable to get replica count"); - } - - ret = dict_get_int32n (dict, "arbiter-count", - SLEN ("arbiter-count"), &arbiter_count); - if (ret) { - gf_msg_debug (THIS->name, 0, - "No arbiter count present in the dict"); - } - - if (replica_count > 0) { - ret = op_version_check (this, GD_OP_VER_PERSISTENT_AFR_XATTRS, - msg, sizeof(msg)); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_OP_VERSION_MISMATCH, "%s", msg); - *op_errstr = gf_strdup (msg); - goto out; - } - } - - if (glusterd_is_volume_replicate (volinfo)) { - /* Do not allow add-brick for stopped volumes when replica-count - * is being increased. - */ - if (conf->op_version >= GD_OP_VERSION_3_7_10 && - !dict_getn (dict, "attach-tier", SLEN ("attach-tier")) && - replica_count && - GLUSTERD_STATUS_STOPPED == volinfo->status) { - ret = -1; - snprintf (msg, sizeof (msg), " Volume must not be in" - " stopped state when replica-count needs to " - " be increased."); - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_BRICK_ADD_FAIL, "%s", msg); - *op_errstr = gf_strdup (msg); - goto out; - } - /* op-version check for replica 2 to arbiter conversion. If we - * don't have this check, an older peer added as arbiter brick - * will not have the arbiter xlator in its volfile. */ - if ((conf->op_version < GD_OP_VERSION_3_8_0) && - (arbiter_count == 1) && (replica_count == 3)) { - ret = -1; - snprintf (msg, sizeof (msg), "Cluster op-version must " - "be >= 30800 to add arbiter brick to a " - "replica 2 volume."); - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_BRICK_ADD_FAIL, "%s", msg); - *op_errstr = gf_strdup (msg); - goto out; - } - /* Do not allow increasing replica count for arbiter volumes. */ - if (replica_count && volinfo->arbiter_count) { - ret = -1; - snprintf (msg, sizeof (msg), "Increasing replica count " - "for arbiter volumes is not supported."); - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_BRICK_ADD_FAIL, "%s", msg); - *op_errstr = gf_strdup (msg); - goto out; - } - } - - is_force = dict_get_str_boolean (dict, "force", _gf_false); - - if (volinfo->replica_count < replica_count && !is_force) { - cds_list_for_each_entry (brickinfo, &volinfo->bricks, - brick_list) { - if (gf_uuid_compare (brickinfo->uuid, MY_UUID)) - continue; - if (brickinfo->status == GF_BRICK_STOPPED) { - ret = -1; - len = snprintf (msg, sizeof (msg), "Brick %s " - "is down, changing replica " - "count needs all the bricks " - "to be up to avoid data loss", - brickinfo->path); - if (len < 0) { - strcpy(msg, "<error>"); - } - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_BRICK_ADD_FAIL, "%s", msg); - *op_errstr = gf_strdup (msg); - goto out; - } - } - } - - if (conf->op_version > GD_OP_VERSION_3_7_5 && - is_origin_glusterd (dict)) { - ret = glusterd_validate_quorum (this, GD_OP_ADD_BRICK, dict, - op_errstr); - if (ret) { - gf_msg (this->name, GF_LOG_CRITICAL, 0, - GD_MSG_SERVER_QUORUM_NOT_MET, - "Server quorum not met. Rejecting operation."); - goto out; - } - } else { - /* Case 1: conf->op_version <= GD_OP_VERSION_3_7_5 - * in this case the add-brick is running - * syncop framework that will do a quorum - * check by default - * Case 2: We don't need to do quorum check on every - * node, only originator glusterd need to - * check for quorum - * So nothing need to be done in else - */ - } - - if (glusterd_is_defrag_on(volinfo)) { - snprintf (msg, sizeof(msg), "Volume name %s rebalance is in " - "progress. Please retry after completion", volname); - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_OIP_RETRY_LATER, "%s", msg); - *op_errstr = gf_strdup (msg); + if (glusterd_is_volume_replicate(volinfo)) { + /* Do not allow add-brick for stopped volumes when replica-count + * is being increased. + */ + if (conf->op_version >= GD_OP_VERSION_3_7_10 && + !dict_getn(dict, "attach-tier", SLEN("attach-tier")) && + replica_count && GLUSTERD_STATUS_STOPPED == volinfo->status) { + ret = -1; + snprintf(msg, sizeof(msg), + " Volume must not be in" + " stopped state when replica-count needs to " + " be increased."); + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", + msg); + *op_errstr = gf_strdup(msg); + goto out; + } + /* op-version check for replica 2 to arbiter conversion. If we + * don't have this check, an older peer added as arbiter brick + * will not have the arbiter xlator in its volfile. */ + if ((conf->op_version < GD_OP_VERSION_3_8_0) && (arbiter_count == 1) && + (replica_count == 3)) { + ret = -1; + snprintf(msg, sizeof(msg), + "Cluster op-version must " + "be >= 30800 to add arbiter brick to a " + "replica 2 volume."); + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", + msg); + *op_errstr = gf_strdup(msg); + goto out; + } + /* Do not allow increasing replica count for arbiter volumes. */ + if (replica_count && volinfo->arbiter_count) { + ret = -1; + snprintf(msg, sizeof(msg), + "Increasing replica count " + "for arbiter volumes is not supported."); + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", + msg); + *op_errstr = gf_strdup(msg); + goto out; + } + } + + is_force = dict_get_str_boolean(dict, "force", _gf_false); + + if (volinfo->replica_count < replica_count && !is_force) { + cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list) + { + if (gf_uuid_compare(brickinfo->uuid, MY_UUID)) + continue; + if (brickinfo->status == GF_BRICK_STOPPED) { ret = -1; - goto out; - } - - if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) { - - /* - * This check is needed because of add/remove brick - * is not supported on a tiered volume. So once a tier - * is attached we cannot commit or stop the remove-brick - * task. Please change this comment once we start supporting - * add/remove brick on a tiered volume. - */ - if (!gd_is_remove_brick_committed (volinfo)) { - - snprintf (msg, sizeof (msg), "An earlier remove-brick " - "task exists for volume %s. Either commit it" - " or stop it before attaching a tier.", - volinfo->volname); - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_OLD_REMOVE_BRICK_EXISTS, "%s", msg); - *op_errstr = gf_strdup (msg); - ret = -1; - goto out; + len = snprintf(msg, sizeof(msg), + "Brick %s " + "is down, changing replica " + "count needs all the bricks " + "to be up to avoid data loss", + brickinfo->path); + if (len < 0) { + strcpy(msg, "<error>"); } + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", + msg); + *op_errstr = gf_strdup(msg); + goto out; + } } + } - ret = dict_get_int32n (dict, "count", SLEN ("count"), &count); + if (conf->op_version > GD_OP_VERSION_3_7_5 && is_origin_glusterd(dict)) { + ret = glusterd_validate_quorum(this, GD_OP_ADD_BRICK, dict, op_errstr); if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get count"); - goto out; - } + gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET, + "Server quorum not met. Rejecting operation."); + goto out; + } + } else { + /* Case 1: conf->op_version <= GD_OP_VERSION_3_7_5 + * in this case the add-brick is running + * syncop framework that will do a quorum + * check by default + * Case 2: We don't need to do quorum check on every + * node, only originator glusterd need to + * check for quorum + * So nothing need to be done in else + */ + } + + if (glusterd_is_defrag_on(volinfo)) { + snprintf(msg, sizeof(msg), + "Volume name %s rebalance is in " + "progress. Please retry after completion", + volname); + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg); + *op_errstr = gf_strdup(msg); + ret = -1; + goto out; + } - ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks); + if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) { + /* + * This check is needed because of add/remove brick + * is not supported on a tiered volume. So once a tier + * is attached we cannot commit or stop the remove-brick + * task. Please change this comment once we start supporting + * add/remove brick on a tiered volume. + */ + if (!gd_is_remove_brick_committed(volinfo)) { + snprintf(msg, sizeof(msg), + "An earlier remove-brick " + "task exists for volume %s. Either commit it" + " or stop it before attaching a tier.", + volinfo->volname); + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OLD_REMOVE_BRICK_EXISTS, + "%s", msg); + *op_errstr = gf_strdup(msg); + ret = -1; + goto out; + } + } + + ret = dict_get_int32n(dict, "count", SLEN("count"), &count); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get count"); + goto out; + } + + ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks); + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get bricks"); + goto out; + } + + if (bricks) { + brick_list = gf_strdup(bricks); + all_bricks = gf_strdup(bricks); + free_ptr = brick_list; + } + + if (count) + brick = strtok_r(brick_list + 1, " \n", &saveptr); + + while (i < count) { + if (!glusterd_store_is_valid_brickpath(volname, brick) || + !glusterd_is_valid_volfpath(volname, brick)) { + snprintf(msg, sizeof(msg), + "brick path %s is " + "too long", + brick); + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s", + msg); + *op_errstr = gf_strdup(msg); + + ret = -1; + goto out; + } + + ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true, + NULL); if (ret) { - gf_msg (THIS->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get bricks"); - goto out; + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND, + "Add-brick: Unable" + " to get brickinfo"); + goto out; } + brick_alloc = _gf_true; - if (bricks) { - brick_list = gf_strdup (bricks); - all_bricks = gf_strdup (bricks); - free_ptr = brick_list; + ret = glusterd_new_brick_validate(brick, brickinfo, msg, sizeof(msg), + NULL); + if (ret) { + *op_errstr = gf_strdup(msg); + ret = -1; + goto out; } - if (count) - brick = strtok_r (brick_list+1, " \n", &saveptr); - - - while ( i < count) { - if (!glusterd_store_is_valid_brickpath (volname, brick) || - !glusterd_is_valid_volfpath (volname, brick)) { - snprintf (msg, sizeof (msg), "brick path %s is " - "too long", brick); - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_BRKPATH_TOO_LONG, "%s", msg); - *op_errstr = gf_strdup (msg); - - ret = -1; - goto out; - - } - - ret = glusterd_brickinfo_new_from_brick (brick, &brickinfo, - _gf_true, NULL); - if (ret) { - gf_msg (THIS->name, GF_LOG_ERROR, 0, - GD_MSG_BRICK_NOT_FOUND, - "Add-brick: Unable" - " to get brickinfo"); - goto out; - } - brick_alloc = _gf_true; - - ret = glusterd_new_brick_validate (brick, brickinfo, msg, - sizeof (msg), NULL); + if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) { +#ifdef HAVE_BD_XLATOR + if (brickinfo->vg[0]) { + ret = glusterd_is_valid_vg(brickinfo, 1, msg); if (ret) { - *op_errstr = gf_strdup (msg); - ret = -1; - goto out; + gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_VG, + "%s", msg); + *op_errstr = gf_strdup(msg); + goto out; } - - if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) { -#ifdef HAVE_BD_XLATOR - if (brickinfo->vg[0]) { - ret = glusterd_is_valid_vg (brickinfo, 1, msg); - if (ret) { - gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_VG, "%s", - msg); - *op_errstr = gf_strdup (msg); - goto out; - } - } + } #endif - ret = glusterd_validate_and_create_brickpath (brickinfo, - volinfo->volume_id, - op_errstr, is_force, - _gf_false); - if (ret) - goto out; - - /* A bricks mount dir is required only by snapshots which were - * introduced in gluster-3.6.0 - */ - if (conf->op_version >= GD_OP_VERSION_3_6_0) { - ret = glusterd_get_brick_mount_dir - (brickinfo->path, brickinfo->hostname, - brickinfo->mount_dir); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_BRICK_MOUNTDIR_GET_FAIL, - "Failed to get brick mount_dir"); - goto out; - } - - snprintf (key, sizeof(key), "brick%d.mount_dir", - i + 1); - ret = dict_set_dynstr_with_alloc - (rsp_dict, key, brickinfo->mount_dir); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, - "Failed to set %s", key); - goto out; - } - } - - local_brick_count = i + 1; - } - - glusterd_brickinfo_delete (brickinfo); - brick_alloc = _gf_false; - brickinfo = NULL; - brick = strtok_r (NULL, " \n", &saveptr); - i++; - } - - ret = dict_set_int32n (rsp_dict, "brick_count", SLEN ("brick_count"), - local_brick_count); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, - "Failed to set local_brick_count"); + ret = glusterd_validate_and_create_brickpath( + brickinfo, volinfo->volume_id, op_errstr, is_force, _gf_false); + if (ret) goto out; - } -out: - GF_FREE (free_ptr); - if (brick_alloc && brickinfo) - glusterd_brickinfo_delete (brickinfo); - GF_FREE (str_ret); - GF_FREE (all_bricks); - - gf_msg_debug (THIS->name, 0, "Returning %d", ret); - - return ret; -} - -int -glusterd_remove_brick_validate_bricks (gf1_op_commands cmd, int32_t brick_count, - dict_t *dict, - glusterd_volinfo_t *volinfo, - char **errstr, - gf_cli_defrag_type cmd_defrag) -{ - char *brick = NULL; - char msg[2048] = ""; - char key[64] = ""; - int keylen; - glusterd_brickinfo_t *brickinfo = NULL; - glusterd_peerinfo_t *peerinfo = NULL; - int i = 0; - int ret = -1; - char pidfile[PATH_MAX+1] = {0,}; - glusterd_conf_t *priv = THIS->private; - int pid = -1; - - /* Check whether all the nodes of the bricks to be removed are - * up, if not fail the operation */ - for (i = 1; i <= brick_count; i++) { - keylen = snprintf (key, sizeof (key), "brick%d", i); - ret = dict_get_strn (dict, key, keylen, &brick); + /* A bricks mount dir is required only by snapshots which were + * introduced in gluster-3.6.0 + */ + if (conf->op_version >= GD_OP_VERSION_3_6_0) { + ret = glusterd_get_brick_mount_dir( + brickinfo->path, brickinfo->hostname, brickinfo->mount_dir); if (ret) { - snprintf (msg, sizeof (msg), - "Unable to get %s", key); - *errstr = gf_strdup (msg); - goto out; + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_BRICK_MOUNTDIR_GET_FAIL, + "Failed to get brick mount_dir"); + goto out; } - ret = - glusterd_volume_brickinfo_get_by_brick(brick, volinfo, - &brickinfo, - _gf_false); + snprintf(key, sizeof(key), "brick%d.mount_dir", i + 1); + ret = dict_set_dynstr_with_alloc(rsp_dict, key, + brickinfo->mount_dir); if (ret) { - snprintf (msg, sizeof (msg), "Incorrect brick " - "%s for volume %s", brick, volinfo->volname); - *errstr = gf_strdup (msg); - goto out; + gf_msg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_SET_FAILED, "Failed to set %s", key); + goto out; } - /* Do not allow commit if the bricks are not decommissioned - * if its a remove brick commit or detach-tier commit - */ - if (!brickinfo->decommissioned) { - if (cmd == GF_OP_CMD_COMMIT) { - snprintf (msg, sizeof (msg), "Brick %s " - "is not decommissioned. " - "Use start or force option", brick); - *errstr = gf_strdup (msg); - ret = -1; - goto out; - } + } - if (cmd == GF_OP_CMD_DETACH_COMMIT || - cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT) { - snprintf (msg, sizeof (msg), "Bricks in Hot " - "tier are not decommissioned yet. Use " - "gluster volume tier <VOLNAME> " - "detach start to start the decommission process"); - *errstr = gf_strdup (msg); - ret = -1; - goto out; - } - } else { - if ((cmd == GF_OP_CMD_DETACH_COMMIT || - (cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT)) && - (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_STARTED)) { - snprintf (msg, sizeof (msg), "Bricks in Hot " - "tier are not decommissioned yet. Wait for " - "the detach to complete using gluster volume " - "tier <VOLNAME> status."); - *errstr = gf_strdup (msg); - ret = -1; - goto out; - } - } + local_brick_count = i + 1; + } - if (glusterd_is_local_brick (THIS, volinfo, brickinfo)) { - switch (cmd) { - case GF_OP_CMD_START: - case GF_OP_CMD_DETACH_START: - goto check; - case GF_OP_CMD_NONE: - default: - break; - } + glusterd_brickinfo_delete(brickinfo); + brick_alloc = _gf_false; + brickinfo = NULL; + brick = strtok_r(NULL, " \n", &saveptr); + i++; + } - switch (cmd_defrag) { - case GF_DEFRAG_CMD_DETACH_START: - break; - case GF_DEFRAG_CMD_NONE: - default: - continue; - } -check: - if (brickinfo->status != GF_BRICK_STARTED) { - snprintf (msg, sizeof (msg), "Found stopped " - "brick %s. Use force option to " - "remove the offline brick" , brick); - *errstr = gf_strdup (msg); - ret = -1; - goto out; - } - GLUSTERD_GET_BRICK_PIDFILE (pidfile, volinfo, - brickinfo, priv); - if (!gf_is_service_running (pidfile, &pid)) { - snprintf (msg, sizeof (msg), "Found dead " - "brick %s", brick); - *errstr = gf_strdup (msg); - ret = -1; - goto out; - } else { - ret = 0; - } - continue; - } - - rcu_read_lock (); - peerinfo = glusterd_peerinfo_find_by_uuid - (brickinfo->uuid); - if (!peerinfo) { - snprintf (msg, sizeof(msg), "Host node of the " - "brick %s is not in cluster", brick); - *errstr = gf_strdup (msg); - ret = -1; - rcu_read_unlock (); - goto out; - } - if (!peerinfo->connected) { - snprintf (msg, sizeof(msg), "Host node of the " - "brick %s is down", brick); - *errstr = gf_strdup (msg); - ret = -1; - rcu_read_unlock (); - goto out; - } - rcu_read_unlock (); - } + ret = dict_set_int32n(rsp_dict, "brick_count", SLEN("brick_count"), + local_brick_count); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to set local_brick_count"); + goto out; + } out: - return ret; + GF_FREE(free_ptr); + if (brick_alloc && brickinfo) + glusterd_brickinfo_delete(brickinfo); + GF_FREE(str_ret); + GF_FREE(all_bricks); + + gf_msg_debug(THIS->name, 0, "Returning %d", ret); + + return ret; } int -glusterd_op_stage_remove_brick (dict_t *dict, char **op_errstr) +glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count, + dict_t *dict, glusterd_volinfo_t *volinfo, + char **errstr, + gf_cli_defrag_type cmd_defrag) { - int ret = -1; - char *volname = NULL; - glusterd_volinfo_t *volinfo = NULL; - char *errstr = NULL; - int32_t brick_count = 0; - char msg[2048] = ""; - int32_t flag = 0; - gf1_op_commands cmd = GF_OP_CMD_NONE; - char *task_id_str = NULL; - xlator_t *this = NULL; - gsync_status_param_t param = {0,}; - - this = THIS; - GF_ASSERT (this); - - ret = op_version_check (this, GD_OP_VER_PERSISTENT_AFR_XATTRS, - msg, sizeof(msg)); + char *brick = NULL; + char msg[2048] = ""; + char key[64] = ""; + int keylen; + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_peerinfo_t *peerinfo = NULL; + int i = 0; + int ret = -1; + char pidfile[PATH_MAX + 1] = { + 0, + }; + glusterd_conf_t *priv = THIS->private; + int pid = -1; + + /* Check whether all the nodes of the bricks to be removed are + * up, if not fail the operation */ + for (i = 1; i <= brick_count; i++) { + keylen = snprintf(key, sizeof(key), "brick%d", i); + ret = dict_get_strn(dict, key, keylen, &brick); if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_OP_VERSION_MISMATCH, "%s", msg); - *op_errstr = gf_strdup (msg); - goto out; + snprintf(msg, sizeof(msg), "Unable to get %s", key); + *errstr = gf_strdup(msg); + goto out; } - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "Unable to get volume name"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); - + ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo, + _gf_false); if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_NOT_FOUND, "Volume %s does not exist", volname); + snprintf(msg, sizeof(msg), + "Incorrect brick " + "%s for volume %s", + brick, volinfo->volname); + *errstr = gf_strdup(msg); + goto out; + } + /* Do not allow commit if the bricks are not decommissioned + * if its a remove brick commit or detach-tier commit + */ + if (!brickinfo->decommissioned) { + if (cmd == GF_OP_CMD_COMMIT) { + snprintf(msg, sizeof(msg), + "Brick %s " + "is not decommissioned. " + "Use start or force option", + brick); + *errstr = gf_strdup(msg); + ret = -1; goto out; - } + } - ret = glusterd_validate_volume_id (dict, volinfo); - if (ret) + if (cmd == GF_OP_CMD_DETACH_COMMIT || + cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT) { + snprintf(msg, sizeof(msg), + "Bricks in Hot " + "tier are not decommissioned yet. Use " + "gluster volume tier <VOLNAME> " + "detach start to start the decommission process"); + *errstr = gf_strdup(msg); + ret = -1; goto out; - - ret = dict_get_int32n (dict, "command", SLEN ("command"), &flag); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, - "Unable to get brick command"); + } + } else { + if ((cmd == GF_OP_CMD_DETACH_COMMIT || + (cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT)) && + (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_STARTED)) { + snprintf(msg, sizeof(msg), + "Bricks in Hot " + "tier are not decommissioned yet. Wait for " + "the detach to complete using gluster volume " + "tier <VOLNAME> status."); + *errstr = gf_strdup(msg); + ret = -1; goto out; - } - cmd = flag; - - ret = dict_get_int32n (dict, "count", SLEN ("count"), &brick_count); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get brick count"); + } + } + + if (glusterd_is_local_brick(THIS, volinfo, brickinfo)) { + switch (cmd) { + case GF_OP_CMD_START: + case GF_OP_CMD_DETACH_START: + goto check; + case GF_OP_CMD_NONE: + default: + break; + } + + switch (cmd_defrag) { + case GF_DEFRAG_CMD_DETACH_START: + break; + case GF_DEFRAG_CMD_NONE: + default: + continue; + } + check: + if (brickinfo->status != GF_BRICK_STARTED) { + snprintf(msg, sizeof(msg), + "Found stopped " + "brick %s. Use force option to " + "remove the offline brick", + brick); + *errstr = gf_strdup(msg); + ret = -1; goto out; - } - - ret = 0; - if (volinfo->brick_count == brick_count) { - errstr = gf_strdup ("Deleting all the bricks of the " - "volume is not allowed"); + } + GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo, priv); + if (!gf_is_service_running(pidfile, &pid)) { + snprintf(msg, sizeof(msg), + "Found dead " + "brick %s", + brick); + *errstr = gf_strdup(msg); ret = -1; goto out; - } + } else { + ret = 0; + } + continue; + } + + rcu_read_lock(); + peerinfo = glusterd_peerinfo_find_by_uuid(brickinfo->uuid); + if (!peerinfo) { + snprintf(msg, sizeof(msg), + "Host node of the " + "brick %s is not in cluster", + brick); + *errstr = gf_strdup(msg); + ret = -1; + rcu_read_unlock(); + goto out; + } + if (!peerinfo->connected) { + snprintf(msg, sizeof(msg), + "Host node of the " + "brick %s is down", + brick); + *errstr = gf_strdup(msg); + ret = -1; + rcu_read_unlock(); + goto out; + } + rcu_read_unlock(); + } +out: + return ret; +} + +int +glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr) +{ + int ret = -1; + char *volname = NULL; + glusterd_volinfo_t *volinfo = NULL; + char *errstr = NULL; + int32_t brick_count = 0; + char msg[2048] = ""; + int32_t flag = 0; + gf1_op_commands cmd = GF_OP_CMD_NONE; + char *task_id_str = NULL; + xlator_t *this = NULL; + gsync_status_param_t param = { + 0, + }; + + this = THIS; + GF_ASSERT(this); + + ret = op_version_check(this, GD_OP_VER_PERSISTENT_AFR_XATTRS, msg, + sizeof(msg)); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH, "%s", + msg); + *op_errstr = gf_strdup(msg); + goto out; + } + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, + "Volume %s does not exist", volname); + goto out; + } + + ret = glusterd_validate_volume_id(dict, volinfo); + if (ret) + goto out; + + ret = dict_get_int32n(dict, "command", SLEN("command"), &flag); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get brick command"); + goto out; + } + cmd = flag; + + ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get brick count"); + goto out; + } + + ret = 0; + if (volinfo->brick_count == brick_count) { + errstr = gf_strdup( + "Deleting all the bricks of the " + "volume is not allowed"); ret = -1; - switch (cmd) { + goto out; + } + + ret = -1; + switch (cmd) { case GF_OP_CMD_NONE: - errstr = gf_strdup ("no remove-brick command issued"); - goto out; + errstr = gf_strdup("no remove-brick command issued"); + goto out; case GF_OP_CMD_STATUS: - ret = 0; - goto out; + ret = 0; + goto out; case GF_OP_CMD_DETACH_START: - if (volinfo->type != GF_CLUSTER_TYPE_TIER) { - snprintf (msg, sizeof(msg), "volume %s is not a tier " - "volume", volinfo->volname); - errstr = gf_strdup (msg); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_NOT_TIER, "%s", errstr); - goto out; - } - - case GF_OP_CMD_START: - { - if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) && - dict_getn (dict, "replica-count", - SLEN ("replica-count"))) { - snprintf (msg, sizeof(msg), "Migration of data is not " - "needed when reducing replica count. Use the" - " 'force' option"); - errstr = gf_strdup (msg); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_USE_THE_FORCE, "%s", errstr); - goto out; - } - - if (GLUSTERD_STATUS_STARTED != volinfo->status) { - if (volinfo->type == GF_CLUSTER_TYPE_TIER) { - snprintf (msg, sizeof (msg), "Volume %s needs " - "to be started before detach-tier " - "(you can use 'force' or 'commit' " - "to override this behavior)", - volinfo->volname); - } else { - snprintf (msg, sizeof (msg), "Volume %s needs " - "to be started before remove-brick " - "(you can use 'force' or 'commit' " - "to override this behavior)", - volinfo->volname); - } - errstr = gf_strdup (msg); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_NOT_STARTED, "%s", errstr); - goto out; - } - if (!gd_is_remove_brick_committed (volinfo)) { - snprintf (msg, sizeof (msg), "An earlier remove-brick " - "task exists for volume %s. Either commit it" - " or stop it before starting a new task.", - volinfo->volname); - errstr = gf_strdup (msg); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_OLD_REMOVE_BRICK_EXISTS, "Earlier remove-brick" - " task exists for volume %s.", - volinfo->volname); - goto out; - } - if (glusterd_is_defrag_on(volinfo)) { - errstr = gf_strdup("Rebalance is in progress. Please " - "retry after completion"); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_OIP_RETRY_LATER, "%s", errstr); - goto out; - } - - /* Check if the connected clients are all of version - * glusterfs-3.6 and higher. This is needed to prevent some data - * loss issues that could occur when older clients are connected - * when rebalance is run. - */ - ret = glusterd_check_client_op_version_support - (volname, GD_OP_VERSION_3_6_0, NULL); - if (ret) { - ret = gf_asprintf (op_errstr, "Volume %s has one or " - "more connected clients of a version" - " lower than GlusterFS-v3.6.0. " - "Starting remove-brick in this state " - "could lead to data loss.\nPlease " - "disconnect those clients before " - "attempting this command again.", - volname); - goto out; - } - - ret = glusterd_remove_brick_validate_bricks (cmd, brick_count, - dict, volinfo, - &errstr, - GF_DEFRAG_CMD_NONE); - if (ret) - goto out; - - if (is_origin_glusterd (dict)) { - ret = glusterd_generate_and_set_task_id - (dict, GF_REMOVE_BRICK_TID_KEY, - SLEN (GF_REMOVE_BRICK_TID_KEY)); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_TASKID_GEN_FAIL, - "Failed to generate task-id"); - goto out; - } + if (volinfo->type != GF_CLUSTER_TYPE_TIER) { + snprintf(msg, sizeof(msg), + "volume %s is not a tier " + "volume", + volinfo->volname); + errstr = gf_strdup(msg); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s", + errstr); + goto out; + } + + case GF_OP_CMD_START: { + if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) && + dict_getn(dict, "replica-count", SLEN("replica-count"))) { + snprintf(msg, sizeof(msg), + "Migration of data is not " + "needed when reducing replica count. Use the" + " 'force' option"); + errstr = gf_strdup(msg); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_USE_THE_FORCE, "%s", + errstr); + goto out; + } + + if (GLUSTERD_STATUS_STARTED != volinfo->status) { + if (volinfo->type == GF_CLUSTER_TYPE_TIER) { + snprintf(msg, sizeof(msg), + "Volume %s needs " + "to be started before detach-tier " + "(you can use 'force' or 'commit' " + "to override this behavior)", + volinfo->volname); } else { - ret = dict_get_strn (dict, GF_REMOVE_BRICK_TID_KEY, - SLEN (GF_REMOVE_BRICK_TID_KEY), - &task_id_str); - if (ret) { - gf_msg (this->name, GF_LOG_WARNING, errno, - GD_MSG_DICT_GET_FAILED, - "Missing remove-brick-id"); - ret = 0; - } + snprintf(msg, sizeof(msg), + "Volume %s needs " + "to be started before remove-brick " + "(you can use 'force' or 'commit' " + "to override this behavior)", + volinfo->volname); + } + errstr = gf_strdup(msg); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED, + "%s", errstr); + goto out; + } + if (!gd_is_remove_brick_committed(volinfo)) { + snprintf(msg, sizeof(msg), + "An earlier remove-brick " + "task exists for volume %s. Either commit it" + " or stop it before starting a new task.", + volinfo->volname); + errstr = gf_strdup(msg); + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_OLD_REMOVE_BRICK_EXISTS, + "Earlier remove-brick" + " task exists for volume %s.", + volinfo->volname); + goto out; + } + if (glusterd_is_defrag_on(volinfo)) { + errstr = gf_strdup( + "Rebalance is in progress. Please " + "retry after completion"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, + "%s", errstr); + goto out; + } + + /* Check if the connected clients are all of version + * glusterfs-3.6 and higher. This is needed to prevent some data + * loss issues that could occur when older clients are connected + * when rebalance is run. + */ + ret = glusterd_check_client_op_version_support( + volname, GD_OP_VERSION_3_6_0, NULL); + if (ret) { + ret = gf_asprintf(op_errstr, + "Volume %s has one or " + "more connected clients of a version" + " lower than GlusterFS-v3.6.0. " + "Starting remove-brick in this state " + "could lead to data loss.\nPlease " + "disconnect those clients before " + "attempting this command again.", + volname); + goto out; + } + + ret = glusterd_remove_brick_validate_bricks( + cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE); + if (ret) + goto out; + + if (is_origin_glusterd(dict)) { + ret = glusterd_generate_and_set_task_id( + dict, GF_REMOVE_BRICK_TID_KEY, + SLEN(GF_REMOVE_BRICK_TID_KEY)); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TASKID_GEN_FAIL, + "Failed to generate task-id"); + goto out; + } + } else { + ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY, + SLEN(GF_REMOVE_BRICK_TID_KEY), + &task_id_str); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, errno, + GD_MSG_DICT_GET_FAILED, "Missing remove-brick-id"); + ret = 0; } - break; + } + break; } case GF_OP_CMD_STOP: case GF_OP_CMD_STOP_DETACH_TIER: - ret = 0; - break; + ret = 0; + break; case GF_OP_CMD_DETACH_COMMIT: - if (volinfo->type != GF_CLUSTER_TYPE_TIER) { - snprintf (msg, sizeof(msg), "volume %s is not a tier " - "volume", volinfo->volname); - errstr = gf_strdup (msg); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_NOT_TIER, "%s", errstr); - goto out; - } - if (volinfo->decommission_in_progress) { - errstr = gf_strdup ("use 'force' option as migration " - "is in progress"); - goto out; - } - if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) { - errstr = gf_strdup ("use 'force' option as migration " - "has failed"); - goto out; - } - - ret = glusterd_remove_brick_validate_bricks (cmd, brick_count, - dict, volinfo, - &errstr, - GF_DEFRAG_CMD_NONE); - if (ret) - goto out; - - /* If geo-rep is configured, for this volume, it should be - * stopped. - */ - param.volinfo = volinfo; - ret = glusterd_check_geo_rep_running (¶m, op_errstr); - if (ret || param.is_active) { - ret = -1; - goto out; - } - break; + if (volinfo->type != GF_CLUSTER_TYPE_TIER) { + snprintf(msg, sizeof(msg), + "volume %s is not a tier " + "volume", + volinfo->volname); + errstr = gf_strdup(msg); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s", + errstr); + goto out; + } + if (volinfo->decommission_in_progress) { + errstr = gf_strdup( + "use 'force' option as migration " + "is in progress"); + goto out; + } + if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) { + errstr = gf_strdup( + "use 'force' option as migration " + "has failed"); + goto out; + } + + ret = glusterd_remove_brick_validate_bricks( + cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE); + if (ret) + goto out; + + /* If geo-rep is configured, for this volume, it should be + * stopped. + */ + param.volinfo = volinfo; + ret = glusterd_check_geo_rep_running(¶m, op_errstr); + if (ret || param.is_active) { + ret = -1; + goto out; + } + break; case GF_OP_CMD_COMMIT: - if (volinfo->decommission_in_progress) { - errstr = gf_strdup ("use 'force' option as migration " - "is in progress"); - goto out; - } + if (volinfo->decommission_in_progress) { + errstr = gf_strdup( + "use 'force' option as migration " + "is in progress"); + goto out; + } - if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) { - errstr = gf_strdup ("use 'force' option as migration " - "has failed"); - goto out; - } + if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) { + errstr = gf_strdup( + "use 'force' option as migration " + "has failed"); + goto out; + } - ret = glusterd_remove_brick_validate_bricks (cmd, brick_count, - dict, volinfo, - &errstr, - GF_DEFRAG_CMD_NONE); - if (ret) - goto out; + ret = glusterd_remove_brick_validate_bricks( + cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE); + if (ret) + goto out; - /* If geo-rep is configured, for this volume, it should be - * stopped. - */ - param.volinfo = volinfo; - ret = glusterd_check_geo_rep_running (¶m, op_errstr); - if (ret || param.is_active) { - ret = -1; - goto out; - } + /* If geo-rep is configured, for this volume, it should be + * stopped. + */ + param.volinfo = volinfo; + ret = glusterd_check_geo_rep_running(¶m, op_errstr); + if (ret || param.is_active) { + ret = -1; + goto out; + } - break; + break; case GF_OP_CMD_DETACH_COMMIT_FORCE: - if (volinfo->type != GF_CLUSTER_TYPE_TIER) { - snprintf (msg, sizeof(msg), "volume %s is not a tier " - "volume", volinfo->volname); - errstr = gf_strdup (msg); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_NOT_TIER, "%s", errstr); - goto out; - } + if (volinfo->type != GF_CLUSTER_TYPE_TIER) { + snprintf(msg, sizeof(msg), + "volume %s is not a tier " + "volume", + volinfo->volname); + errstr = gf_strdup(msg); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s", + errstr); + goto out; + } case GF_OP_CMD_COMMIT_FORCE: - break; - } - ret = 0; + break; + } + ret = 0; out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - if (ret && errstr) { - if (op_errstr) - *op_errstr = errstr; - } + gf_msg_debug(this->name, 0, "Returning %d", ret); + if (ret && errstr) { + if (op_errstr) + *op_errstr = errstr; + } - return ret; + return ret; } int -glusterd_remove_brick_migrate_cbk (glusterd_volinfo_t *volinfo, - gf_defrag_status_t status) +glusterd_remove_brick_migrate_cbk(glusterd_volinfo_t *volinfo, + gf_defrag_status_t status) { - int ret = 0; + int ret = 0; -#if 0 /* TODO: enable this behavior once cluster-wide awareness comes for - defrag cbk function */ +#if 0 /* TODO: enable this behavior once cluster-wide awareness comes for \ + defrag cbk function */ glusterd_brickinfo_t *brickinfo = NULL; glusterd_brickinfo_t *tmp = NULL; @@ -2514,962 +2490,931 @@ glusterd_remove_brick_migrate_cbk (glusterd_volinfo_t *volinfo, #endif - volinfo->decommission_in_progress = 0; - return ret; + volinfo->decommission_in_progress = 0; + return ret; } static int -glusterd_op_perform_attach_tier (dict_t *dict, - glusterd_volinfo_t *volinfo, - int count, - char *bricks) +glusterd_op_perform_attach_tier(dict_t *dict, glusterd_volinfo_t *volinfo, + int count, char *bricks) { - int ret = 0; - int replica_count = 0; - int type = 0; - - /* - * Store the new (cold) tier's structure until the graph is generated. - * If there is a failure before the graph is generated the - * structure will revert to its original state. - */ - volinfo->tier_info.cold_dist_leaf_count = volinfo->dist_leaf_count; - volinfo->tier_info.cold_type = volinfo->type; - volinfo->tier_info.cold_brick_count = volinfo->brick_count; - volinfo->tier_info.cold_replica_count = volinfo->replica_count; - volinfo->tier_info.cold_disperse_count = volinfo->disperse_count; - volinfo->tier_info.cold_redundancy_count = volinfo->redundancy_count; - - ret = dict_get_int32n (dict, "replica-count", - SLEN ("replica-count"), &replica_count); - if (!ret) - volinfo->tier_info.hot_replica_count = replica_count; - else - volinfo->tier_info.hot_replica_count = 1; - volinfo->tier_info.hot_brick_count = count; - ret = dict_get_int32n (dict, "hot-type", SLEN ("hot-type"), &type); - volinfo->tier_info.hot_type = type; - ret = dict_set_int32n (dict, "type", SLEN ("type"), - GF_CLUSTER_TYPE_TIER); - - if (!ret) - ret = dict_set_nstrn (volinfo->dict, "features.ctr-enabled", - SLEN ("features.ctr-enabled"), - "on", SLEN ("on")); - - if (!ret) - ret = dict_set_nstrn (volinfo->dict, "cluster.tier-mode", - SLEN ("cluster.tier-mode"), - "cache", SLEN ("cache")); - - return ret; + int ret = 0; + int replica_count = 0; + int type = 0; + + /* + * Store the new (cold) tier's structure until the graph is generated. + * If there is a failure before the graph is generated the + * structure will revert to its original state. + */ + volinfo->tier_info.cold_dist_leaf_count = volinfo->dist_leaf_count; + volinfo->tier_info.cold_type = volinfo->type; + volinfo->tier_info.cold_brick_count = volinfo->brick_count; + volinfo->tier_info.cold_replica_count = volinfo->replica_count; + volinfo->tier_info.cold_disperse_count = volinfo->disperse_count; + volinfo->tier_info.cold_redundancy_count = volinfo->redundancy_count; + + ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), + &replica_count); + if (!ret) + volinfo->tier_info.hot_replica_count = replica_count; + else + volinfo->tier_info.hot_replica_count = 1; + volinfo->tier_info.hot_brick_count = count; + ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type); + volinfo->tier_info.hot_type = type; + ret = dict_set_int32n(dict, "type", SLEN("type"), GF_CLUSTER_TYPE_TIER); + + if (!ret) + ret = dict_set_nstrn(volinfo->dict, "features.ctr-enabled", + SLEN("features.ctr-enabled"), "on", SLEN("on")); + + if (!ret) + ret = dict_set_nstrn(volinfo->dict, "cluster.tier-mode", + SLEN("cluster.tier-mode"), "cache", SLEN("cache")); + + return ret; } int -glusterd_op_add_brick (dict_t *dict, char **op_errstr) +glusterd_op_add_brick(dict_t *dict, char **op_errstr) { - int ret = 0; - char *volname = NULL; - glusterd_conf_t *priv = NULL; - glusterd_volinfo_t *volinfo = NULL; - xlator_t *this = NULL; - char *bricks = NULL; - int32_t count = 0; - - this = THIS; - GF_ASSERT (this); - - priv = this->private; - GF_ASSERT (priv); - - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get volume name"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); - - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, EINVAL, - GD_MSG_VOL_NOT_FOUND, "Unable to allocate memory"); - goto out; - } - - ret = dict_get_int32n (dict, "count", SLEN ("count"), &count); - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get count"); - goto out; - } - - - ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks); - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get bricks"); - goto out; - } - - if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) { - gf_msg_debug (THIS->name, 0, "Adding tier"); - glusterd_op_perform_attach_tier (dict, volinfo, count, bricks); - } - - ret = glusterd_op_perform_add_bricks (volinfo, count, bricks, dict); - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, 0, - GD_MSG_BRICK_ADD_FAIL, "Unable to add bricks"); - goto out; - } - if (priv->op_version <= GD_OP_VERSION_3_7_5) { - ret = glusterd_store_volinfo (volinfo, - GLUSTERD_VOLINFO_VER_AC_INCREMENT); - if (ret) - goto out; - } else { - /* - * The cluster is operating at version greater than - * gluster-3.7.5. So no need to store volfiles - * in commit phase, the same will be done - * in post validate phase with v3 framework. - */ - } + int ret = 0; + char *volname = NULL; + glusterd_conf_t *priv = NULL; + glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = NULL; + char *bricks = NULL; + int32_t count = 0; + + this = THIS; + GF_ASSERT(this); + + priv = this->private; + GF_ASSERT(priv); + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, + "Unable to allocate memory"); + goto out; + } + + ret = dict_get_int32n(dict, "count", SLEN("count"), &count); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get count"); + goto out; + } + + ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get bricks"); + goto out; + } + + if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) { + gf_msg_debug(THIS->name, 0, "Adding tier"); + glusterd_op_perform_attach_tier(dict, volinfo, count, bricks); + } + + ret = glusterd_op_perform_add_bricks(volinfo, count, bricks, dict); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, + "Unable to add bricks"); + goto out; + } + if (priv->op_version <= GD_OP_VERSION_3_7_5) { + ret = glusterd_store_volinfo(volinfo, + GLUSTERD_VOLINFO_VER_AC_INCREMENT); + if (ret) + goto out; + } else { + /* + * The cluster is operating at version greater than + * gluster-3.7.5. So no need to store volfiles + * in commit phase, the same will be done + * in post validate phase with v3 framework. + */ + } - if (GLUSTERD_STATUS_STARTED == volinfo->status) - ret = glusterd_svcs_manager (volinfo); + if (GLUSTERD_STATUS_STARTED == volinfo->status) + ret = glusterd_svcs_manager(volinfo); out: - return ret; + return ret; } int -glusterd_op_add_tier_brick (dict_t *dict, char **op_errstr) +glusterd_op_add_tier_brick(dict_t *dict, char **op_errstr) { - int ret = 0; - char *volname = NULL; - glusterd_conf_t *priv = NULL; - glusterd_volinfo_t *volinfo = NULL; - xlator_t *this = NULL; - char *bricks = NULL; - int32_t count = 0; - - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); - - priv = this->private; - GF_VALIDATE_OR_GOTO (this->name, priv, out); - - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get volume name"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); - - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, EINVAL, - GD_MSG_VOL_NOT_FOUND, "Volume not found"); - goto out; - } - - ret = dict_get_int32n (dict, "count", SLEN ("count"), &count); - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get count"); - goto out; - } - - - ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks); - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get bricks"); - goto out; - } - - if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) { - gf_msg_debug (THIS->name, 0, "Adding tier"); - glusterd_op_perform_attach_tier (dict, volinfo, count, bricks); - } - - ret = glusterd_op_perform_add_bricks (volinfo, count, bricks, dict); - if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, 0, - GD_MSG_BRICK_ADD_FAIL, "Unable to add bricks"); - goto out; - } - if (priv->op_version <= GD_OP_VERSION_3_10_0) { - ret = glusterd_store_volinfo (volinfo, - GLUSTERD_VOLINFO_VER_AC_INCREMENT); - if (ret) - goto out; - } else { - /* - * The cluster is operating at version greater than - * gluster-3.10.0. So no need to store volfiles - * in commit phase, the same will be done - * in post validate phase with v3 framework. - */ - } + int ret = 0; + char *volname = NULL; + glusterd_conf_t *priv = NULL; + glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = NULL; + char *bricks = NULL; + int32_t count = 0; + + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); + + priv = this->private; + GF_VALIDATE_OR_GOTO(this->name, priv, out); + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, + "Volume not found"); + goto out; + } + + ret = dict_get_int32n(dict, "count", SLEN("count"), &count); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get count"); + goto out; + } + + ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get bricks"); + goto out; + } + + if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) { + gf_msg_debug(THIS->name, 0, "Adding tier"); + glusterd_op_perform_attach_tier(dict, volinfo, count, bricks); + } + + ret = glusterd_op_perform_add_bricks(volinfo, count, bricks, dict); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, + "Unable to add bricks"); + goto out; + } + if (priv->op_version <= GD_OP_VERSION_3_10_0) { + ret = glusterd_store_volinfo(volinfo, + GLUSTERD_VOLINFO_VER_AC_INCREMENT); + if (ret) + goto out; + } else { + /* + * The cluster is operating at version greater than + * gluster-3.10.0. So no need to store volfiles + * in commit phase, the same will be done + * in post validate phase with v3 framework. + */ + } - if (GLUSTERD_STATUS_STARTED == volinfo->status) - ret = glusterd_svcs_manager (volinfo); + if (GLUSTERD_STATUS_STARTED == volinfo->status) + ret = glusterd_svcs_manager(volinfo); out: - return ret; + return ret; } void -glusterd_op_perform_detach_tier (glusterd_volinfo_t *volinfo) +glusterd_op_perform_detach_tier(glusterd_volinfo_t *volinfo) { - volinfo->type = volinfo->tier_info.cold_type; - volinfo->replica_count = volinfo->tier_info.cold_replica_count; - volinfo->disperse_count = volinfo->tier_info.cold_disperse_count; - volinfo->redundancy_count = volinfo->tier_info.cold_redundancy_count; - volinfo->dist_leaf_count = volinfo->tier_info.cold_dist_leaf_count; + volinfo->type = volinfo->tier_info.cold_type; + volinfo->replica_count = volinfo->tier_info.cold_replica_count; + volinfo->disperse_count = volinfo->tier_info.cold_disperse_count; + volinfo->redundancy_count = volinfo->tier_info.cold_redundancy_count; + volinfo->dist_leaf_count = volinfo->tier_info.cold_dist_leaf_count; } int -glusterd_op_remove_brick (dict_t *dict, char **op_errstr) +glusterd_op_remove_brick(dict_t *dict, char **op_errstr) { - int ret = -1; - char *volname = NULL; - glusterd_volinfo_t *volinfo = NULL; - char *brick = NULL; - int32_t count = 0; - int32_t i = 1; - char key[64] = ""; - int keylen; - int32_t flag = 0; - int need_rebalance = 0; - int force = 0; - gf1_op_commands cmd = 0; - int32_t replica_count = 0; - glusterd_brickinfo_t *brickinfo = NULL; - glusterd_brickinfo_t *tmp = NULL; - char *task_id_str = NULL; - xlator_t *this = NULL; - dict_t *bricks_dict = NULL; - char *brick_tmpstr = NULL; - int start_remove = 0; - uint32_t commit_hash = 0; - int defrag_cmd = 0; - int detach_commit = 0; - void *tier_info = NULL; - char *cold_shd_key = NULL; - char *hot_shd_key = NULL; - int delete_key = 1; - glusterd_conf_t *conf = NULL; - - this = THIS; - GF_ASSERT (this); - conf = this->private; - GF_VALIDATE_OR_GOTO (this->name, conf, out); - - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_BRICK_ADD_FAIL, "Unable to get volume name"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &volinfo); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, EINVAL, - GD_MSG_VOL_NOT_FOUND, "Unable to allocate memory"); - goto out; - } - - ret = dict_get_int32n (dict, "command", SLEN ("command"), &flag); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get command"); - goto out; - } - cmd = flag; - - if ((GF_OP_CMD_START == cmd) || - (GF_OP_CMD_DETACH_START == cmd)) - start_remove = 1; - - /* Set task-id, if available, in ctx dict for operations other than - * start - */ - - if (is_origin_glusterd (dict) && (!start_remove)) { - if (!gf_uuid_is_null (volinfo->rebal.rebalance_id)) { - ret = glusterd_copy_uuid_to_dict - (volinfo->rebal.rebalance_id, dict, - GF_REMOVE_BRICK_TID_KEY, - SLEN (GF_REMOVE_BRICK_TID_KEY)); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_REMOVE_BRICK_ID_SET_FAIL, - "Failed to set remove-brick-id"); - goto out; - } - } - } - - /* Clear task-id, rebal.op and stored bricks on commmitting/stopping - * remove-brick */ - if ((!start_remove) && (cmd != GF_OP_CMD_STATUS)) { - gf_uuid_clear (volinfo->rebal.rebalance_id); - volinfo->rebal.op = GD_OP_NONE; - dict_unref (volinfo->rebal.dict); - volinfo->rebal.dict = NULL; - } - - ret = -1; - switch (cmd) { + int ret = -1; + char *volname = NULL; + glusterd_volinfo_t *volinfo = NULL; + char *brick = NULL; + int32_t count = 0; + int32_t i = 1; + char key[64] = ""; + int keylen; + int32_t flag = 0; + int need_rebalance = 0; + int force = 0; + gf1_op_commands cmd = 0; + int32_t replica_count = 0; + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_brickinfo_t *tmp = NULL; + char *task_id_str = NULL; + xlator_t *this = NULL; + dict_t *bricks_dict = NULL; + char *brick_tmpstr = NULL; + int start_remove = 0; + uint32_t commit_hash = 0; + int defrag_cmd = 0; + int detach_commit = 0; + void *tier_info = NULL; + char *cold_shd_key = NULL; + char *hot_shd_key = NULL; + int delete_key = 1; + glusterd_conf_t *conf = NULL; + + this = THIS; + GF_ASSERT(this); + conf = this->private; + GF_VALIDATE_OR_GOTO(this->name, conf, out); + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, + "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, + "Unable to allocate memory"); + goto out; + } + + ret = dict_get_int32n(dict, "command", SLEN("command"), &flag); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get command"); + goto out; + } + cmd = flag; + + if ((GF_OP_CMD_START == cmd) || (GF_OP_CMD_DETACH_START == cmd)) + start_remove = 1; + + /* Set task-id, if available, in ctx dict for operations other than + * start + */ + + if (is_origin_glusterd(dict) && (!start_remove)) { + if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) { + ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id, dict, + GF_REMOVE_BRICK_TID_KEY, + SLEN(GF_REMOVE_BRICK_TID_KEY)); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_REMOVE_BRICK_ID_SET_FAIL, + "Failed to set remove-brick-id"); + goto out; + } + } + } + + /* Clear task-id, rebal.op and stored bricks on commmitting/stopping + * remove-brick */ + if ((!start_remove) && (cmd != GF_OP_CMD_STATUS)) { + gf_uuid_clear(volinfo->rebal.rebalance_id); + volinfo->rebal.op = GD_OP_NONE; + dict_unref(volinfo->rebal.dict); + volinfo->rebal.dict = NULL; + } + + ret = -1; + switch (cmd) { case GF_OP_CMD_NONE: - goto out; + goto out; case GF_OP_CMD_STATUS: - ret = 0; - goto out; + ret = 0; + goto out; case GF_OP_CMD_STOP: - case GF_OP_CMD_STOP_DETACH_TIER: - { - /* Fall back to the old volume file */ - cds_list_for_each_entry_safe (brickinfo, tmp, &volinfo->bricks, - brick_list) { - if (!brickinfo->decommissioned) - continue; - brickinfo->decommissioned = 0; - } - ret = glusterd_create_volfiles_and_notify_services (volinfo); - if (ret) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_VOLFILE_CREATE_FAIL, - "failed to create volfiles"); - goto out; - } - - ret = glusterd_store_volinfo (volinfo, - GLUSTERD_VOLINFO_VER_AC_INCREMENT); - if (ret) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_VOLINFO_SET_FAIL, - "failed to store volinfo"); - goto out; - } - - ret = 0; - goto out; + case GF_OP_CMD_STOP_DETACH_TIER: { + /* Fall back to the old volume file */ + cds_list_for_each_entry_safe(brickinfo, tmp, &volinfo->bricks, + brick_list) + { + if (!brickinfo->decommissioned) + continue; + brickinfo->decommissioned = 0; + } + ret = glusterd_create_volfiles_and_notify_services(volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, + GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles"); + goto out; + } + + ret = glusterd_store_volinfo(volinfo, + GLUSTERD_VOLINFO_VER_AC_INCREMENT); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL, + "failed to store volinfo"); + goto out; + } + + ret = 0; + goto out; } case GF_OP_CMD_DETACH_START: case GF_OP_CMD_START: - /* Reset defrag status to 'NOT STARTED' whenever a - * remove-brick/rebalance command is issued to remove - * stale information from previous run. - * Update defrag_cmd as well or it will only be done - * for nodes on which the brick to be removed exists. - */ - volinfo->rebal.defrag_cmd = cmd; - volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED; - ret = dict_get_strn (dict, GF_REMOVE_BRICK_TID_KEY, - SLEN (GF_REMOVE_BRICK_TID_KEY), - &task_id_str); - if (ret) { - gf_msg_debug (this->name, errno, - "Missing remove-brick-id"); - ret = 0; - } else { - gf_uuid_parse (task_id_str, volinfo->rebal.rebalance_id) ; - volinfo->rebal.op = GD_OP_REMOVE_BRICK; - } - force = 0; - break; + /* Reset defrag status to 'NOT STARTED' whenever a + * remove-brick/rebalance command is issued to remove + * stale information from previous run. + * Update defrag_cmd as well or it will only be done + * for nodes on which the brick to be removed exists. + */ + volinfo->rebal.defrag_cmd = cmd; + volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED; + ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY, + SLEN(GF_REMOVE_BRICK_TID_KEY), &task_id_str); + if (ret) { + gf_msg_debug(this->name, errno, "Missing remove-brick-id"); + ret = 0; + } else { + gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id); + volinfo->rebal.op = GD_OP_REMOVE_BRICK; + } + force = 0; + break; case GF_OP_CMD_COMMIT: - force = 1; - break; + force = 1; + break; case GF_OP_CMD_DETACH_COMMIT: case GF_OP_CMD_DETACH_COMMIT_FORCE: - glusterd_op_perform_detach_tier (volinfo); - detach_commit = 1; - - /* Disabling ctr when detaching a tier, since - * currently tier is the only consumer of ctr. - * Revisit this code when this constraint no - * longer exist. + glusterd_op_perform_detach_tier(volinfo); + detach_commit = 1; + + /* Disabling ctr when detaching a tier, since + * currently tier is the only consumer of ctr. + * Revisit this code when this constraint no + * longer exist. + */ + dict_deln(volinfo->dict, "features.ctr-enabled", + SLEN("features.ctr-enabled")); + dict_deln(volinfo->dict, "cluster.tier-mode", + SLEN("cluster.tier-mode")); + + hot_shd_key = gd_get_shd_key(volinfo->tier_info.hot_type); + cold_shd_key = gd_get_shd_key(volinfo->tier_info.cold_type); + if (hot_shd_key) { + /* + * Since post detach, shd graph will not contain hot + * tier. So we need to clear option set for hot tier. + * For a tiered volume there can be different key + * for both hot and cold. If hot tier is shd compatible + * then we need to remove the configured value when + * detaching a tier, only if the key's are different or + * cold key is NULL. So we will set delete_key first, + * and if cold key is not null and they are equal then + * we will clear the flag. Otherwise we will delete the + * key. */ - dict_deln (volinfo->dict, "features.ctr-enabled", - SLEN ("features.ctr-enabled")); - dict_deln (volinfo->dict, "cluster.tier-mode", - SLEN ("cluster.tier-mode")); - - hot_shd_key = gd_get_shd_key (volinfo->tier_info.hot_type); - cold_shd_key = gd_get_shd_key (volinfo->tier_info.cold_type); - if (hot_shd_key) { - /* - * Since post detach, shd graph will not contain hot - * tier. So we need to clear option set for hot tier. - * For a tiered volume there can be different key - * for both hot and cold. If hot tier is shd compatible - * then we need to remove the configured value when - * detaching a tier, only if the key's are different or - * cold key is NULL. So we will set delete_key first, - * and if cold key is not null and they are equal then - * we will clear the flag. Otherwise we will delete the - * key. - */ - if (cold_shd_key) - delete_key = strcmp (hot_shd_key, cold_shd_key); - if (delete_key) - dict_del (volinfo->dict, hot_shd_key); - } - /* fall through */ + if (cold_shd_key) + delete_key = strcmp(hot_shd_key, cold_shd_key); + if (delete_key) + dict_del(volinfo->dict, hot_shd_key); + } + /* fall through */ case GF_OP_CMD_COMMIT_FORCE: - if (volinfo->decommission_in_progress) { - if (volinfo->rebal.defrag) { - LOCK (&volinfo->rebal.defrag->lock); - /* Fake 'rebalance-complete' so the graph change - happens right away */ - volinfo->rebal.defrag_status = - GF_DEFRAG_STATUS_COMPLETE; - - UNLOCK (&volinfo->rebal.defrag->lock); - } - /* Graph change happens in rebalance _cbk function, - no need to do anything here */ - /* TODO: '_cbk' function is not doing anything for now */ - } - - ret = 0; - force = 1; - break; + if (volinfo->decommission_in_progress) { + if (volinfo->rebal.defrag) { + LOCK(&volinfo->rebal.defrag->lock); + /* Fake 'rebalance-complete' so the graph change + happens right away */ + volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_COMPLETE; + + UNLOCK(&volinfo->rebal.defrag->lock); + } + /* Graph change happens in rebalance _cbk function, + no need to do anything here */ + /* TODO: '_cbk' function is not doing anything for now */ + } + + ret = 0; + force = 1; + break; + } + + ret = dict_get_int32n(dict, "count", SLEN("count"), &count); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get count"); + goto out; + } + + if (volinfo->type == GF_CLUSTER_TYPE_TIER) + count = glusterd_set_detach_bricks(dict, volinfo); + + /* Save the list of bricks for later usage only on starting a + * remove-brick. Right now this is required for displaying the task + * parameters with task status in volume status. + */ + + if (start_remove) { + bricks_dict = dict_new(); + if (!bricks_dict) { + ret = -1; + goto out; + } + ret = dict_set_int32n(bricks_dict, "count", SLEN("count"), count); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to save remove-brick count"); + goto out; } + } - ret = dict_get_int32n (dict, "count", SLEN ("count"), &count); + while (i <= count) { + keylen = snprintf(key, sizeof(key), "brick%d", i); + ret = dict_get_strn(dict, key, keylen, &brick); if (ret) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get count"); - goto out; + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get %s", key); + goto out; } - if (volinfo->type == GF_CLUSTER_TYPE_TIER) - count = glusterd_set_detach_bricks(dict, volinfo); - - /* Save the list of bricks for later usage only on starting a - * remove-brick. Right now this is required for displaying the task - * parameters with task status in volume status. - */ - if (start_remove) { - bricks_dict = dict_new (); - if (!bricks_dict) { - ret = -1; - goto out; - } - ret = dict_set_int32n (bricks_dict, "count", - SLEN ("count"), count); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, - "Failed to save remove-brick count"); - goto out; - } - } - - while ( i <= count) { - keylen = snprintf (key, sizeof(key), "brick%d", i); - ret = dict_get_strn (dict, key, keylen, &brick); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Unable to get %s", - key); - goto out; - } - - if (start_remove) { - brick_tmpstr = gf_strdup (brick); - if (!brick_tmpstr) { - ret = -1; - gf_msg (this->name, GF_LOG_ERROR, ENOMEM, - GD_MSG_NO_MEMORY, - "Failed to duplicate brick name"); - goto out; - } - ret = dict_set_dynstrn (bricks_dict, key, keylen, - brick_tmpstr); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, - "Failed to add brick to dict"); - goto out; - } - brick_tmpstr = NULL; - } - - ret = glusterd_op_perform_remove_brick (volinfo, brick, force, - &need_rebalance); - if (ret) - goto out; - i++; - } - - if (detach_commit) { - /* Clear related information from volinfo */ - tier_info = ((void *)(&volinfo->tier_info)); - memset (tier_info, 0, sizeof (volinfo->tier_info)); - } - - if (start_remove) - volinfo->rebal.dict = dict_ref (bricks_dict); - - ret = dict_get_int32n (dict, "replica-count", - SLEN ("replica-count"), &replica_count); - if (!ret) { - gf_msg (this->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_FAILED, - "changing replica count %d to %d on volume %s", - volinfo->replica_count, replica_count, - volinfo->volname); - volinfo->replica_count = replica_count; - /* A reduction in replica count implies an arbiter volume - * earlier is now no longer one. */ - if (volinfo->arbiter_count) - volinfo->arbiter_count = 0; - volinfo->sub_count = replica_count; - volinfo->dist_leaf_count = glusterd_get_dist_leaf_count (volinfo); - - /* - * volinfo->type and sub_count have already been set for - * volumes undergoing a detach operation, they should not - * be modified here. - */ - if ((replica_count == 1) && (cmd != GF_OP_CMD_DETACH_COMMIT) && - (cmd != GF_OP_CMD_DETACH_COMMIT_FORCE)) { - if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) { - volinfo->type = GF_CLUSTER_TYPE_NONE; - /* backward compatibility */ - volinfo->sub_count = 0; - } else { - volinfo->type = GF_CLUSTER_TYPE_STRIPE; - /* backward compatibility */ - volinfo->sub_count = volinfo->dist_leaf_count; - } - } + brick_tmpstr = gf_strdup(brick); + if (!brick_tmpstr) { + ret = -1; + gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, + "Failed to duplicate brick name"); + goto out; + } + ret = dict_set_dynstrn(bricks_dict, key, keylen, brick_tmpstr); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to add brick to dict"); + goto out; + } + brick_tmpstr = NULL; } - volinfo->subvol_count = (volinfo->brick_count / - volinfo->dist_leaf_count); - if (!glusterd_is_volume_replicate (volinfo) && - conf->op_version >= GD_OP_VERSION_3_12_2) { - ret = dict_set_nstrn (volinfo->dict, - "performance.client-io-threads", - SLEN ("performance.client-io-threads"), - "on", SLEN ("on")); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_SET_FAILED, "Failed to set " - "performance.client-io-threads to on"); - goto out; - } - } + ret = glusterd_op_perform_remove_brick(volinfo, brick, force, + &need_rebalance); + if (ret) + goto out; + i++; + } + + if (detach_commit) { + /* Clear related information from volinfo */ + tier_info = ((void *)(&volinfo->tier_info)); + memset(tier_info, 0, sizeof(volinfo->tier_info)); + } + + if (start_remove) + volinfo->rebal.dict = dict_ref(bricks_dict); + + ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), + &replica_count); + if (!ret) { + gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED, + "changing replica count %d to %d on volume %s", + volinfo->replica_count, replica_count, volinfo->volname); + volinfo->replica_count = replica_count; + /* A reduction in replica count implies an arbiter volume + * earlier is now no longer one. */ + if (volinfo->arbiter_count) + volinfo->arbiter_count = 0; + volinfo->sub_count = replica_count; + volinfo->dist_leaf_count = glusterd_get_dist_leaf_count(volinfo); - ret = glusterd_create_volfiles_and_notify_services (volinfo); + /* + * volinfo->type and sub_count have already been set for + * volumes undergoing a detach operation, they should not + * be modified here. + */ + if ((replica_count == 1) && (cmd != GF_OP_CMD_DETACH_COMMIT) && + (cmd != GF_OP_CMD_DETACH_COMMIT_FORCE)) { + if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) { + volinfo->type = GF_CLUSTER_TYPE_NONE; + /* backward compatibility */ + volinfo->sub_count = 0; + } else { + volinfo->type = GF_CLUSTER_TYPE_STRIPE; + /* backward compatibility */ + volinfo->sub_count = volinfo->dist_leaf_count; + } + } + } + volinfo->subvol_count = (volinfo->brick_count / volinfo->dist_leaf_count); + + if (!glusterd_is_volume_replicate(volinfo) && + conf->op_version >= GD_OP_VERSION_3_12_2) { + ret = dict_set_nstrn(volinfo->dict, "performance.client-io-threads", + SLEN("performance.client-io-threads"), "on", + SLEN("on")); if (ret) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles"); - goto out; - } - - ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Failed to set " + "performance.client-io-threads to on"); + goto out; + } + } + + ret = glusterd_create_volfiles_and_notify_services(volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLFILE_CREATE_FAIL, + "failed to create volfiles"); + goto out; + } + + ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_STORE_FAIL, + "failed to store volinfo"); + goto out; + } + + if (start_remove && volinfo->status == GLUSTERD_STATUS_STARTED) { + ret = glusterd_svcs_reconfigure(); if (ret) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_VOLINFO_STORE_FAIL, "failed to store volinfo"); - goto out; + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL, + "Unable to reconfigure NFS-Server"); + goto out; } + } - if (start_remove && - volinfo->status == GLUSTERD_STATUS_STARTED) { - ret = glusterd_svcs_reconfigure (); - if (ret) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_NFS_RECONF_FAIL, - "Unable to reconfigure NFS-Server"); - goto out; - } - } - - /* Need to reset the defrag/rebalance status accordingly */ - switch (volinfo->rebal.defrag_status) { + /* Need to reset the defrag/rebalance status accordingly */ + switch (volinfo->rebal.defrag_status) { case GF_DEFRAG_STATUS_FAILED: case GF_DEFRAG_STATUS_COMPLETE: - volinfo->rebal.defrag_status = 0; + volinfo->rebal.defrag_status = 0; /* FALLTHROUGH */ default: - break; - } - if (!force && need_rebalance) { - if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) { - volinfo->rebal.commit_hash = commit_hash; - } - /* perform the rebalance operations */ - defrag_cmd = GF_DEFRAG_CMD_START_FORCE; - if (cmd == GF_OP_CMD_DETACH_START) - defrag_cmd = GF_DEFRAG_CMD_START_DETACH_TIER; - /* - * We need to set this *before* we issue commands to the - * bricks, or else we might end up setting it after the bricks - * have responded. If we fail to send the request(s) we'll - * clear it ourselves because nobody else will. - */ - volinfo->decommission_in_progress = 1; - char err_str[4096] = ""; - ret = glusterd_handle_defrag_start - (volinfo, err_str, sizeof (err_str), - defrag_cmd, - glusterd_remove_brick_migrate_cbk, GD_OP_REMOVE_BRICK); + break; + } + if (!force && need_rebalance) { + if (dict_get_uint32(dict, "commit-hash", &commit_hash) == 0) { + volinfo->rebal.commit_hash = commit_hash; + } + /* perform the rebalance operations */ + defrag_cmd = GF_DEFRAG_CMD_START_FORCE; + if (cmd == GF_OP_CMD_DETACH_START) + defrag_cmd = GF_DEFRAG_CMD_START_DETACH_TIER; + /* + * We need to set this *before* we issue commands to the + * bricks, or else we might end up setting it after the bricks + * have responded. If we fail to send the request(s) we'll + * clear it ourselves because nobody else will. + */ + volinfo->decommission_in_progress = 1; + char err_str[4096] = ""; + ret = glusterd_handle_defrag_start( + volinfo, err_str, sizeof(err_str), defrag_cmd, + glusterd_remove_brick_migrate_cbk, GD_OP_REMOVE_BRICK); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_REBALANCE_START_FAIL, - "failed to start the rebalance"); - /* TBD: shouldn't we do more than print a message? */ - volinfo->decommission_in_progress = 0; - if (op_errstr) - *op_errstr = gf_strdup (err_str); - } - } else { - if (GLUSTERD_STATUS_STARTED == volinfo->status) - ret = glusterd_svcs_manager (volinfo); - } + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REBALANCE_START_FAIL, + "failed to start the rebalance"); + /* TBD: shouldn't we do more than print a message? */ + volinfo->decommission_in_progress = 0; + if (op_errstr) + *op_errstr = gf_strdup(err_str); + } + } else { + if (GLUSTERD_STATUS_STARTED == volinfo->status) + ret = glusterd_svcs_manager(volinfo); + } out: - GF_FREE (brick_tmpstr); - if (bricks_dict) - dict_unref (bricks_dict); + GF_FREE(brick_tmpstr); + if (bricks_dict) + dict_unref(bricks_dict); - return ret; + return ret; } int -glusterd_op_stage_barrier (dict_t *dict, char **op_errstr) +glusterd_op_stage_barrier(dict_t *dict, char **op_errstr) { - int ret = -1; - xlator_t *this = NULL; - char *volname = NULL; - glusterd_volinfo_t *vol = NULL; - - GF_ASSERT (dict); - this = THIS; - GF_ASSERT (this); - - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Volname not present in " - "dict"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &vol); - if (ret) { - gf_asprintf (op_errstr, "Volume %s does not exist", volname); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_NOT_FOUND, "%s", *op_errstr); - goto out; - } - - if (!glusterd_is_volume_started (vol)) { - gf_asprintf (op_errstr, "Volume %s is not started", volname); - ret = -1; - goto out; - } - - ret = dict_get_str_boolean (dict, "barrier", -1); - if (ret == -1) { - gf_asprintf (op_errstr, "Barrier op for volume %s not present " - "in dict", volname); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", *op_errstr); - goto out; - } - ret = 0; + int ret = -1; + xlator_t *this = NULL; + char *volname = NULL; + glusterd_volinfo_t *vol = NULL; + + GF_ASSERT(dict); + this = THIS; + GF_ASSERT(this); + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Volname not present in " + "dict"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &vol); + if (ret) { + gf_asprintf(op_errstr, "Volume %s does not exist", volname); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", + *op_errstr); + goto out; + } + + if (!glusterd_is_volume_started(vol)) { + gf_asprintf(op_errstr, "Volume %s is not started", volname); + ret = -1; + goto out; + } + + ret = dict_get_str_boolean(dict, "barrier", -1); + if (ret == -1) { + gf_asprintf(op_errstr, + "Barrier op for volume %s not present " + "in dict", + volname); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + *op_errstr); + goto out; + } + ret = 0; out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } int -glusterd_op_barrier (dict_t *dict, char **op_errstr) +glusterd_op_barrier(dict_t *dict, char **op_errstr) { - int ret = -1; - xlator_t *this = NULL; - char *volname = NULL; - glusterd_volinfo_t *vol = NULL; - char *barrier_op = NULL; - - GF_ASSERT (dict); - this = THIS; - GF_ASSERT (this); - - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Volname not present in " - "dict"); - goto out; - } - - ret = glusterd_volinfo_find (volname, &vol); - if (ret) { - gf_asprintf (op_errstr, "Volume %s does not exist", volname); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_NOT_FOUND, "%s", *op_errstr); - goto out; - } - - ret = dict_get_strn (dict, "barrier", SLEN ("barrier"), &barrier_op); - if (ret) { - gf_asprintf (op_errstr, "Barrier op for volume %s not present " - "in dict", volname); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", *op_errstr); - goto out; - } - - ret = dict_set_dynstr_with_alloc (vol->dict, "features.barrier", - barrier_op); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, "Failed to set barrier op in" - " volume option dict"); - goto out; - } - - gd_update_volume_op_versions (vol); - ret = glusterd_create_volfiles (vol); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOLFILE_CREATE_FAIL, "Failed to create volfiles"); - goto out; - } - ret = glusterd_store_volinfo (vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT); + int ret = -1; + xlator_t *this = NULL; + char *volname = NULL; + glusterd_volinfo_t *vol = NULL; + char *barrier_op = NULL; + + GF_ASSERT(dict); + this = THIS; + GF_ASSERT(this); + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Volname not present in " + "dict"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &vol); + if (ret) { + gf_asprintf(op_errstr, "Volume %s does not exist", volname); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", + *op_errstr); + goto out; + } + + ret = dict_get_strn(dict, "barrier", SLEN("barrier"), &barrier_op); + if (ret) { + gf_asprintf(op_errstr, + "Barrier op for volume %s not present " + "in dict", + volname); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + *op_errstr); + goto out; + } + + ret = dict_set_dynstr_with_alloc(vol->dict, "features.barrier", barrier_op); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to set barrier op in" + " volume option dict"); + goto out; + } + + gd_update_volume_op_versions(vol); + ret = glusterd_create_volfiles(vol); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, + "Failed to create volfiles"); + goto out; + } + ret = glusterd_store_volinfo(vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT); out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } int -__glusterd_handle_add_tier_brick (rpcsvc_request_t *req) +__glusterd_handle_add_tier_brick(rpcsvc_request_t *req) { - int32_t ret = -1; - gf_cli_req cli_req = {{0,} }; - dict_t *dict = NULL; - char *bricks = NULL; - char *volname = NULL; - int brick_count = 0; - void *cli_rsp = NULL; - char err_str[2048] = ""; - gf_cli_rsp rsp = {0,}; - glusterd_volinfo_t *volinfo = NULL; - xlator_t *this = NULL; - int32_t replica_count = 0; - int32_t arbiter_count = 0; - int type = 0; - - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); - - GF_VALIDATE_OR_GOTO (this->name, req, out); - - ret = xdr_to_generic (req->msg[0], &cli_req, - (xdrproc_t)xdr_gf_cli_req); + int32_t ret = -1; + gf_cli_req cli_req = {{ + 0, + }}; + dict_t *dict = NULL; + char *bricks = NULL; + char *volname = NULL; + int brick_count = 0; + void *cli_rsp = NULL; + char err_str[2048] = ""; + gf_cli_rsp rsp = { + 0, + }; + glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = NULL; + int32_t replica_count = 0; + int32_t arbiter_count = 0; + int type = 0; + + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); + + GF_VALIDATE_OR_GOTO(this->name, req, out); + + ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + /*failed to decode msg*/ + req->rpc_err = GARBAGE_ARGS; + snprintf(err_str, sizeof(err_str), "Garbage args received"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, "%s", + err_str); + goto out; + } + + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_ADD_BRICK_REQ_RECVD, + "Received add brick req"); + + if (cli_req.dict.dict_len) { + /* Unserialize the dictionary */ + dict = dict_new(); + + ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, + &dict); if (ret < 0) { - /*failed to decode msg*/ - req->rpc_err = GARBAGE_ARGS; - snprintf (err_str, sizeof (err_str), "Garbage args received"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_GARBAGE_ARGS, "%s", err_str); - goto out; - } - - gf_msg (this->name, GF_LOG_INFO, 0, - GD_MSG_ADD_BRICK_REQ_RECVD, "Received add brick req"); - - if (cli_req.dict.dict_len) { - /* Unserialize the dictionary */ - dict = dict_new (); - - ret = dict_unserialize (cli_req.dict.dict_val, - cli_req.dict.dict_len, - &dict); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to " - "unserialize req-buffer to dictionary"); - snprintf (err_str, sizeof (err_str), "Unable to decode " - "the command"); - goto out; - } - } - - ret = dict_get_strn (dict, "volname", SLEN ("volname"), &volname); - - if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get volume " - "name"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; - } - - if (!glusterd_check_volume_exists (volname)) { - snprintf (err_str, sizeof (err_str), "Volume %s does not exist", - volname); - gf_msg (this->name, GF_LOG_ERROR, EINVAL, - GD_MSG_VOL_NOT_FOUND, "%s", err_str); - ret = -1; - goto out; - } - - ret = dict_get_int32n (dict, "count", SLEN ("count"), &brick_count); - if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get volume " - "brick count"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; - } - - ret = dict_get_int32n (dict, "replica-count", - SLEN ("replica-count"), &replica_count); - if (!ret) { - gf_msg (this->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_SUCCESS, "replica-count is %d", - replica_count); - } - - ret = dict_get_int32n (dict, "arbiter-count", - SLEN ("arbiter-count"), &arbiter_count); - if (!ret) { - gf_msg (this->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_SUCCESS, "arbiter-count is %d", - arbiter_count); - } + gf_msg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_UNSERIALIZE_FAIL, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf(err_str, sizeof(err_str), + "Unable to decode " + "the command"); + goto out; + } + } + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get volume " + "name"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + if (!glusterd_check_volume_exists(volname)) { + snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname); + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s", + err_str); + ret = -1; + goto out; + } + + ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get volume " + "brick count"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), + &replica_count); + if (!ret) { + gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS, + "replica-count is %d", replica_count); + } + + ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"), + &arbiter_count); + if (!ret) { + gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS, + "arbiter-count is %d", arbiter_count); + } + + if (!dict_getn(dict, "force", SLEN("force"))) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Failed to get flag"); + ret = -1; + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get volinfo " + "for volume name %s", + volname); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s", + err_str); + goto out; + } + + if (glusterd_is_tiering_supported(err_str) == _gf_false) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED, + "Tiering not supported at this version"); + ret = -1; + goto out; + } - if (!dict_getn (dict, "force", SLEN ("force"))) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "Failed to get flag"); - ret = -1; - goto out; + if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) { + if (volinfo->type == GF_CLUSTER_TYPE_TIER) { + snprintf(err_str, sizeof(err_str), "Volume %s is already a tier.", + volname); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_TIER, "%s", + err_str); + ret = -1; + goto out; } - ret = glusterd_volinfo_find (volname, &volinfo); + ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type); if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get volinfo " - "for volume name %s", volname); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOLINFO_GET_FAIL, "%s", err_str); - goto out; - - } - - if (glusterd_is_tiering_supported(err_str) == _gf_false) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VERSION_UNSUPPORTED, - "Tiering not supported at this version"); - ret = -1; - goto out; - } - - if (dict_getn (dict, "attach-tier", SLEN ("attach-tier"))) { - if (volinfo->type == GF_CLUSTER_TYPE_TIER) { - snprintf (err_str, sizeof (err_str), - "Volume %s is already a tier.", volname); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOL_ALREADY_TIER, "%s", err_str); - ret = -1; - goto out; - } - - ret = dict_get_int32n (dict, "hot-type", SLEN ("hot-type"), - &type); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, - "failed to get type from dictionary"); - goto out; - } - - } - - ret = dict_get_strn (dict, "bricks", SLEN ("bricks"), &bricks); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "failed to get type from dictionary"); + goto out; + } + } + + ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks); + if (ret) { + snprintf(err_str, sizeof(err_str), + "Unable to get volume " + "bricks"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + if (type != volinfo->type) { + ret = dict_set_int32n(dict, "type", SLEN("type"), type); if (ret) { - snprintf (err_str, sizeof (err_str), "Unable to get volume " - "bricks"); - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_GET_FAILED, "%s", err_str); - goto out; + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "failed to set the new type in dict"); + goto out; } + } - if (type != volinfo->type) { - ret = dict_set_int32n (dict, "type", SLEN ("type"), type); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_DICT_SET_FAILED, - "failed to set the new type in dict"); - goto out; - } - } - - ret = glusterd_mgmt_v3_initiate_all_phases (req, - GD_OP_ADD_TIER_BRICK, - dict); + ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_ADD_TIER_BRICK, dict); out: - if (ret) { - rsp.op_ret = -1; - rsp.op_errno = 0; - if (err_str[0] == '\0') - snprintf (err_str, sizeof (err_str), - "Operation failed"); - rsp.op_errstr = err_str; - cli_rsp = &rsp; - glusterd_to_cli (req, cli_rsp, NULL, 0, NULL, - (xdrproc_t)xdr_gf_cli_rsp, dict); - ret = 0; /*sent error to cli, prevent second reply*/ - } + if (ret) { + rsp.op_ret = -1; + rsp.op_errno = 0; + if (err_str[0] == '\0') + snprintf(err_str, sizeof(err_str), "Operation failed"); + rsp.op_errstr = err_str; + cli_rsp = &rsp; + glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, + dict); + ret = 0; /*sent error to cli, prevent second reply*/ + } - free (cli_req.dict.dict_val); /*its malloced by xdr*/ + free(cli_req.dict.dict_val); /*its malloced by xdr*/ - return ret; + return ret; } int -glusterd_handle_add_tier_brick (rpcsvc_request_t *req) +glusterd_handle_add_tier_brick(rpcsvc_request_t *req) { - return glusterd_big_locked_handler (req, - __glusterd_handle_add_tier_brick); + return glusterd_big_locked_handler(req, __glusterd_handle_add_tier_brick); } int -glusterd_handle_attach_tier (rpcsvc_request_t *req) +glusterd_handle_attach_tier(rpcsvc_request_t *req) { - return glusterd_big_locked_handler (req, __glusterd_handle_add_brick); + return glusterd_big_locked_handler(req, __glusterd_handle_add_brick); } int -glusterd_handle_detach_tier (rpcsvc_request_t *req) +glusterd_handle_detach_tier(rpcsvc_request_t *req) { - return glusterd_big_locked_handler (req, - __glusterd_handle_remove_brick); + return glusterd_big_locked_handler(req, __glusterd_handle_remove_brick); } diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-helper.c b/xlators/mgmt/glusterd/src/glusterd-conn-helper.c index bfa9d02..a7f54ec 100644 --- a/xlators/mgmt/glusterd/src/glusterd-conn-helper.c +++ b/xlators/mgmt/glusterd/src/glusterd-conn-helper.c @@ -15,7 +15,7 @@ #include <urcu/rculist.h> glusterd_svc_t * -glusterd_conn_get_svc_object (glusterd_conn_t *conn) +glusterd_conn_get_svc_object(glusterd_conn_t *conn) { - return cds_list_entry (conn, glusterd_svc_t, conn); + return cds_list_entry(conn, glusterd_svc_t, conn); } diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c index a2c12ed..8702507 100644 --- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c @@ -17,122 +17,119 @@ #include "glusterd-messages.h" int -glusterd_conn_init (glusterd_conn_t *conn, char *sockpath, - int frame_timeout, glusterd_conn_notify_t notify) +glusterd_conn_init(glusterd_conn_t *conn, char *sockpath, int frame_timeout, + glusterd_conn_notify_t notify) { - int ret = -1; - dict_t *options = NULL; - struct rpc_clnt *rpc = NULL; - xlator_t *this = THIS; - glusterd_svc_t *svc = NULL; - - if (!this) - goto out; - - svc = glusterd_conn_get_svc_object (conn); - if (!svc) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SVC_GET_FAIL, "Failed to get the service"); - goto out; - } - - ret = rpc_transport_unix_options_build (&options, sockpath, - frame_timeout); - if (ret) - goto out; - - ret = dict_set_nstrn (options, "transport.socket.ignore-enoent", - SLEN ("transport.socket.ignore-enoent"), - "on", SLEN ("on")); - if (ret) - goto out; - - /* @options is free'd by rpc_transport when destroyed */ - rpc = rpc_clnt_new (options, this, (char *)svc->name, 16); - if (!rpc) { - ret = -1; - goto out; - } - - ret = rpc_clnt_register_notify (rpc, glusterd_conn_common_notify, - conn); - if (ret) - goto out; - - ret = snprintf (conn->sockpath, sizeof (conn->sockpath), "%s", - sockpath); - if (ret < 0) - goto out; - else - ret = 0; - - conn->frame_timeout = frame_timeout; - conn->rpc = rpc; - conn->notify = notify; + int ret = -1; + dict_t *options = NULL; + struct rpc_clnt *rpc = NULL; + xlator_t *this = THIS; + glusterd_svc_t *svc = NULL; + + if (!this) + goto out; + + svc = glusterd_conn_get_svc_object(conn); + if (!svc) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_GET_FAIL, + "Failed to get the service"); + goto out; + } + + ret = rpc_transport_unix_options_build(&options, sockpath, frame_timeout); + if (ret) + goto out; + + ret = dict_set_nstrn(options, "transport.socket.ignore-enoent", + SLEN("transport.socket.ignore-enoent"), "on", + SLEN("on")); + if (ret) + goto out; + + /* @options is free'd by rpc_transport when destroyed */ + rpc = rpc_clnt_new(options, this, (char *)svc->name, 16); + if (!rpc) { + ret = -1; + goto out; + } + + ret = rpc_clnt_register_notify(rpc, glusterd_conn_common_notify, conn); + if (ret) + goto out; + + ret = snprintf(conn->sockpath, sizeof(conn->sockpath), "%s", sockpath); + if (ret < 0) + goto out; + else + ret = 0; + + conn->frame_timeout = frame_timeout; + conn->rpc = rpc; + conn->notify = notify; out: - if (ret) { - if (rpc) { - rpc_clnt_unref (rpc); - rpc = NULL; - } + if (ret) { + if (rpc) { + rpc_clnt_unref(rpc); + rpc = NULL; } - return ret; + } + return ret; } int -glusterd_conn_term (glusterd_conn_t *conn) +glusterd_conn_term(glusterd_conn_t *conn) { - rpc_clnt_unref (conn->rpc); - return 0; + rpc_clnt_unref(conn->rpc); + return 0; } int -glusterd_conn_connect (glusterd_conn_t *conn) +glusterd_conn_connect(glusterd_conn_t *conn) { - return rpc_clnt_start (conn->rpc); + return rpc_clnt_start(conn->rpc); } int -glusterd_conn_disconnect (glusterd_conn_t *conn) +glusterd_conn_disconnect(glusterd_conn_t *conn) { - rpc_clnt_disconnect (conn->rpc); + rpc_clnt_disconnect(conn->rpc); - return 0; + return 0; } - int -__glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, void *data) +__glusterd_conn_common_notify(struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { - glusterd_conn_t *conn = mydata; + glusterd_conn_t *conn = mydata; - /* Silently ignoring this error, exactly like the current - * implementation */ - if (!conn) - return 0; + /* Silently ignoring this error, exactly like the current + * implementation */ + if (!conn) + return 0; - return conn->notify (conn, event); + return conn->notify(conn, event); } int -glusterd_conn_common_notify (struct rpc_clnt *rpc, void *mydata, - rpc_clnt_event_t event, void *data) +glusterd_conn_common_notify(struct rpc_clnt *rpc, void *mydata, + rpc_clnt_event_t event, void *data) { - return glusterd_big_locked_notify - (rpc, mydata, event, data, - __glusterd_conn_common_notify); + return glusterd_big_locked_notify(rpc, mydata, event, data, + __glusterd_conn_common_notify); } int32_t -glusterd_conn_build_socket_filepath (char *rundir, uuid_t uuid, - char *socketpath, int len) +glusterd_conn_build_socket_filepath(char *rundir, uuid_t uuid, char *socketpath, + int len) { - char sockfilepath[PATH_MAX] = {0,}; + char sockfilepath[PATH_MAX] = { + 0, + }; - snprintf (sockfilepath, sizeof (sockfilepath), "%s/run-%s", - rundir, uuid_utoa (uuid)); + snprintf(sockfilepath, sizeof(sockfilepath), "%s/run-%s", rundir, + uuid_utoa(uuid)); - glusterd_set_socket_filepath (sockfilepath, socketpath, len); - return 0; + glusterd_set_socket_filepath(sockfilepath, socketpath, len); + return 0; } diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c index cf003ac..075fdf7 100644 --- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c +++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c @@ -24,357 +24,353 @@ #include <signal.h> static int -dict_get_param (dict_t *dict, char *key, char **param); +dict_get_param(dict_t *dict, char *key, char **param); struct gsync_config_opt_vals_ gsync_confopt_vals[] = { - {.op_name = "change_detector", - .no_of_pos_vals = 2, - .case_sensitive = _gf_true, - .values = {"xsync", "changelog"}, - }, - {.op_name = "special_sync_mode", - .no_of_pos_vals = 2, - .case_sensitive = _gf_true, - .values = {"partial", "recover"} - }, - {.op_name = "log-level", - .no_of_pos_vals = 5, - .case_sensitive = _gf_false, - .values = {"critical", "error", "warning", "info", "debug"} - }, - {.op_name = "use-tarssh", - .no_of_pos_vals = 6, - .case_sensitive = _gf_false, - .values = {"true", "false", "0", "1", "yes", "no"} - }, - {.op_name = "ignore_deletes", - .no_of_pos_vals = 6, - .case_sensitive = _gf_false, - .values = {"true", "false", "0", "1", "yes", "no"} - }, - {.op_name = "use_meta_volume", - .no_of_pos_vals = 6, - .case_sensitive = _gf_false, - .values = {"true", "false", "0", "1", "yes", "no"} - }, - {.op_name = "use-meta-volume", - .no_of_pos_vals = 6, - .case_sensitive = _gf_false, - .values = {"true", "false", "0", "1", "yes", "no"} - }, - {.op_name = NULL, - }, + { + .op_name = "change_detector", + .no_of_pos_vals = 2, + .case_sensitive = _gf_true, + .values = {"xsync", "changelog"}, + }, + {.op_name = "special_sync_mode", + .no_of_pos_vals = 2, + .case_sensitive = _gf_true, + .values = {"partial", "recover"}}, + {.op_name = "log-level", + .no_of_pos_vals = 5, + .case_sensitive = _gf_false, + .values = {"critical", "error", "warning", "info", "debug"}}, + {.op_name = "use-tarssh", + .no_of_pos_vals = 6, + .case_sensitive = _gf_false, + .values = {"true", "false", "0", "1", "yes", "no"}}, + {.op_name = "ignore_deletes", + .no_of_pos_vals = 6, + .case_sensitive = _gf_false, + .values = {"true", "false", "0", "1", "yes", "no"}}, + {.op_name = "use_meta_volume", + .no_of_pos_vals = 6, + .case_sensitive = _gf_false, + .values = {"true", "false", "0", "1", "yes", "no"}}, + {.op_name = "use-meta-volume", + .no_of_pos_vals = 6, + .case_sensitive = _gf_false, + .values = {"true", "false", "0", "1", "yes", "no"}}, + { + .op_name = NULL, + }, }; -static char *gsync_reserved_opts[] = { - "gluster-command", - "pid-file", - "state-file", - "session-owner", - "state-socket-unencoded", - "socketdir", - "local-id", - "local-path", - "slave-id", - NULL -}; +static char *gsync_reserved_opts[] = {"gluster-command", + "pid-file", + "state-file", + "session-owner", + "state-socket-unencoded", + "socketdir", + "local-id", + "local-path", + "slave-id", + NULL}; -static char *gsync_no_restart_opts[] = { - "checkpoint", - "log_rsync_performance", - "log-rsync-performance", - NULL -}; +static char *gsync_no_restart_opts[] = {"checkpoint", "log_rsync_performance", + "log-rsync-performance", NULL}; int -__glusterd_handle_sys_exec (rpcsvc_request_t *req) +__glusterd_handle_sys_exec(rpcsvc_request_t *req) { - int32_t ret = 0; - dict_t *dict = NULL; - gf_cli_req cli_req = {{0},}; - glusterd_op_t cli_op = GD_OP_SYS_EXEC; - glusterd_conf_t *priv = NULL; - char *host_uuid = NULL; - char err_str[64] = {0,}; - xlator_t *this = NULL; - - GF_ASSERT (req); - - this = THIS; - GF_ASSERT (this); - priv = this->private; - GF_ASSERT (priv); - - ret = xdr_to_generic (req->msg[0], &cli_req, - (xdrproc_t)xdr_gf_cli_req); + int32_t ret = 0; + dict_t *dict = NULL; + gf_cli_req cli_req = { + {0}, + }; + glusterd_op_t cli_op = GD_OP_SYS_EXEC; + glusterd_conf_t *priv = NULL; + char *host_uuid = NULL; + char err_str[64] = { + 0, + }; + xlator_t *this = NULL; + + GF_ASSERT(req); + + this = THIS; + GF_ASSERT(this); + priv = this->private; + GF_ASSERT(priv); + + ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + if (cli_req.dict.dict_len) { + dict = dict_new(); + if (!dict) + goto out; + + ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, + &dict); if (ret < 0) { - req->rpc_err = GARBAGE_ARGS; - goto out; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf(err_str, sizeof(err_str), + "Unable to decode " + "the command"); + goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; } - if (cli_req.dict.dict_len) { - dict = dict_new (); - if (!dict) - goto out; - - - ret = dict_unserialize (cli_req.dict.dict_val, - cli_req.dict.dict_len, - &dict); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to " - "unserialize req-buffer to dictionary"); - snprintf (err_str, sizeof (err_str), "Unable to decode " - "the command"); - goto out; - } else { - dict->extra_stdfree = cli_req.dict.dict_val; - } - - host_uuid = gf_strdup (uuid_utoa(MY_UUID)); - if (host_uuid == NULL) { - snprintf (err_str, sizeof (err_str), "Failed to get " - "the uuid of local glusterd"); - ret = -1; - goto out; - } - - ret = dict_set_dynstr (dict, "host-uuid", host_uuid); - if (ret) - goto out; + host_uuid = gf_strdup(uuid_utoa(MY_UUID)); + if (host_uuid == NULL) { + snprintf(err_str, sizeof(err_str), + "Failed to get " + "the uuid of local glusterd"); + ret = -1; + goto out; } - ret = glusterd_op_begin_synctask (req, cli_op, dict); + ret = dict_set_dynstr(dict, "host-uuid", host_uuid); + if (ret) + goto out; + } + + ret = glusterd_op_begin_synctask(req, cli_op, dict); out: - if (ret) { - if (err_str[0] == '\0') - snprintf (err_str, sizeof (err_str), - "Operation failed"); - ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, - dict, err_str); - } - return ret; + if (ret) { + if (err_str[0] == '\0') + snprintf(err_str, sizeof(err_str), "Operation failed"); + ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str); + } + return ret; } int -__glusterd_handle_copy_file (rpcsvc_request_t *req) +__glusterd_handle_copy_file(rpcsvc_request_t *req) { - int32_t ret = 0; - dict_t *dict = NULL; - gf_cli_req cli_req = {{0},}; - glusterd_op_t cli_op = GD_OP_COPY_FILE; - glusterd_conf_t *priv = NULL; - char *host_uuid = NULL; - char err_str[64] = {0,}; - xlator_t *this = NULL; - - GF_ASSERT (req); - - this = THIS; - GF_ASSERT (this); - priv = this->private; - GF_ASSERT (priv); - - ret = xdr_to_generic (req->msg[0], &cli_req, - (xdrproc_t)xdr_gf_cli_req); + int32_t ret = 0; + dict_t *dict = NULL; + gf_cli_req cli_req = { + {0}, + }; + glusterd_op_t cli_op = GD_OP_COPY_FILE; + glusterd_conf_t *priv = NULL; + char *host_uuid = NULL; + char err_str[64] = { + 0, + }; + xlator_t *this = NULL; + + GF_ASSERT(req); + + this = THIS; + GF_ASSERT(this); + priv = this->private; + GF_ASSERT(priv); + + ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + if (cli_req.dict.dict_len) { + dict = dict_new(); + if (!dict) + goto out; + + ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, + &dict); if (ret < 0) { - req->rpc_err = GARBAGE_ARGS; - goto out; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + "failed to" + "unserialize req-buffer to dictionary"); + snprintf(err_str, sizeof(err_str), + "Unable to decode " + "the command"); + goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; } - if (cli_req.dict.dict_len) { - dict = dict_new (); - if (!dict) - goto out; - - - ret = dict_unserialize (cli_req.dict.dict_val, - cli_req.dict.dict_len, - &dict); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to" - "unserialize req-buffer to dictionary"); - snprintf (err_str, sizeof (err_str), "Unable to decode " - "the command"); - goto out; - } else { - dict->extra_stdfree = cli_req.dict.dict_val; - } - - host_uuid = gf_strdup (uuid_utoa(MY_UUID)); - if (host_uuid == NULL) { - snprintf (err_str, sizeof (err_str), "Failed to get " - "the uuid of local glusterd"); - ret = -1; - goto out; - } - - ret = dict_set_dynstr (dict, "host-uuid", host_uuid); - if (ret) - goto out; + host_uuid = gf_strdup(uuid_utoa(MY_UUID)); + if (host_uuid == NULL) { + snprintf(err_str, sizeof(err_str), + "Failed to get " + "the uuid of local glusterd"); + ret = -1; + goto out; } - ret = glusterd_op_begin_synctask (req, cli_op, dict); + ret = dict_set_dynstr(dict, "host-uuid", host_uuid); + if (ret) + goto out; + } + + ret = glusterd_op_begin_synctask(req, cli_op, dict); out: - if (ret) { - if (err_str[0] == '\0') - snprintf (err_str, sizeof (err_str), - "Operation failed"); - ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, - dict, err_str); - } - return ret; + if (ret) { + if (err_str[0] == '\0') + snprintf(err_str, sizeof(err_str), "Operation failed"); + ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str); + } + return ret; } int -__glusterd_handle_gsync_set (rpcsvc_request_t *req) +__glusterd_handle_gsync_set(rpcsvc_request_t *req) { - int32_t ret = 0; - dict_t *dict = NULL; - gf_cli_req cli_req = {{0},}; - glusterd_op_t cli_op = GD_OP_GSYNC_SET; - char *master = NULL; - char *slave = NULL; - char operation[64] = {0,}; - int type = 0; - glusterd_conf_t *priv = NULL; - char *host_uuid = NULL; - char err_str[64] = {0,}; - xlator_t *this = NULL; - - GF_ASSERT (req); - - this = THIS; - GF_ASSERT (this); - priv = this->private; - GF_ASSERT (priv); - - ret = xdr_to_generic (req->msg[0], &cli_req, - (xdrproc_t)xdr_gf_cli_req); - if (ret < 0) { - req->rpc_err = GARBAGE_ARGS; - goto out; - } - - if (cli_req.dict.dict_len) { - dict = dict_new (); - if (!dict) - goto out; - - ret = dict_unserialize (cli_req.dict.dict_val, - cli_req.dict.dict_len, - &dict); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_UNSERIALIZE_FAIL, "failed to " - "unserialize req-buffer to dictionary"); - snprintf (err_str, sizeof (err_str), "Unable to decode " - "the command"); - goto out; - } else { - dict->extra_stdfree = cli_req.dict.dict_val; - } - - host_uuid = gf_strdup (uuid_utoa(MY_UUID)); - if (host_uuid == NULL) { - snprintf (err_str, sizeof (err_str), "Failed to get " - "the uuid of local glusterd"); - ret = -1; - goto out; - } - ret = dict_set_dynstr (dict, "host-uuid", host_uuid); - if (ret) - goto out; - - } - - ret = dict_get_str (dict, "master", &master); + int32_t ret = 0; + dict_t *dict = NULL; + gf_cli_req cli_req = { + {0}, + }; + glusterd_op_t cli_op = GD_OP_GSYNC_SET; + char *master = NULL; + char *slave = NULL; + char operation[64] = { + 0, + }; + int type = 0; + glusterd_conf_t *priv = NULL; + char *host_uuid = NULL; + char err_str[64] = { + 0, + }; + xlator_t *this = NULL; + + GF_ASSERT(req); + + this = THIS; + GF_ASSERT(this); + priv = this->private; + GF_ASSERT(priv); + + ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); + if (ret < 0) { + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + if (cli_req.dict.dict_len) { + dict = dict_new(); + if (!dict) + goto out; + + ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, + &dict); if (ret < 0) { - gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED, - "master not found, while handling "GEOREP" options"); - master = "(No Master)"; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + "failed to " + "unserialize req-buffer to dictionary"); + snprintf(err_str, sizeof(err_str), + "Unable to decode " + "the command"); + goto out; + } else { + dict->extra_stdfree = cli_req.dict.dict_val; } - ret = dict_get_str (dict, "slave", &slave); - if (ret < 0) { - gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED, - "slave not found, while handling "GEOREP" options"); - slave = "(No Slave)"; + host_uuid = gf_strdup(uuid_utoa(MY_UUID)); + if (host_uuid == NULL) { + snprintf(err_str, sizeof(err_str), + "Failed to get " + "the uuid of local glusterd"); + ret = -1; + goto out; } - - ret = dict_get_int32 (dict, "type", &type); - if (ret < 0) { - snprintf (err_str, sizeof (err_str), "Command type not found " - "while handling "GEOREP" options"); - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "%s", err_str); - goto out; - } - - switch (type) { + ret = dict_set_dynstr(dict, "host-uuid", host_uuid); + if (ret) + goto out; + } + + ret = dict_get_str(dict, "master", &master); + if (ret < 0) { + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED, + "master not found, while handling " GEOREP " options"); + master = "(No Master)"; + } + + ret = dict_get_str(dict, "slave", &slave); + if (ret < 0) { + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED, + "slave not found, while handling " GEOREP " options"); + slave = "(No Slave)"; + } + + ret = dict_get_int32(dict, "type", &type); + if (ret < 0) { + snprintf(err_str, sizeof(err_str), + "Command type not found " + "while handling " GEOREP " options"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s", + err_str); + goto out; + } + + switch (type) { case GF_GSYNC_OPTION_TYPE_CREATE: - snprintf (operation, sizeof (operation), "create"); - cli_op = GD_OP_GSYNC_CREATE; - break; + snprintf(operation, sizeof(operation), "create"); + cli_op = GD_OP_GSYNC_CREATE; + break; case GF_GSYNC_OPTION_TYPE_START: - snprintf (operation, sizeof (operation), "start"); - break; + snprintf(operation, sizeof(operation), "start"); + break; case GF_GSYNC_OPTION_TYPE_STOP: - snprintf (operation, sizeof (operation), "stop"); - break; + snprintf(operation, sizeof(operation), "stop"); + break; case GF_GSYNC_OPTION_TYPE_PAUSE: - snprintf (operation, sizeof (operation), "pause"); - break; + snprintf(operation, sizeof(operation), "pause"); + break; case GF_GSYNC_OPTION_TYPE_RESUME: - snprintf (operation, sizeof (operation), "resume"); - break; + snprintf(operation, sizeof(operation), "resume"); + break; case GF_GSYNC_OPTION_TYPE_CONFIG: - snprintf (operation, sizeof (operation), "config"); - break; + snprintf(operation, sizeof(operation), "config"); + break; case GF_GSYNC_OPTION_TYPE_STATUS: - snprintf (operation, sizeof (operation), "status"); - break; - } + snprintf(operation, sizeof(operation), "status"); + break; + } - ret = glusterd_op_begin_synctask (req, cli_op, dict); + ret = glusterd_op_begin_synctask(req, cli_op, dict); out: - if (ret) { - if (err_str[0] == '\0') - snprintf (err_str, sizeof (err_str), - "Operation failed"); - ret = glusterd_op_send_cli_response (cli_op, ret, 0, req, - dict, err_str); - } - return ret; + if (ret) { + if (err_str[0] == '\0') + snprintf(err_str, sizeof(err_str), "Operation failed"); + ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str); + } + return ret; } int -glusterd_handle_sys_exec (rpcsvc_request_t *req) +glusterd_handle_sys_exec(rpcsvc_request_t *req) { - return glusterd_big_locked_handler (req, __glusterd_handle_sys_exec); + return glusterd_big_locked_handler(req, __glusterd_handle_sys_exec); } int -glusterd_handle_copy_file (rpcsvc_request_t *req) +glusterd_handle_copy_file(rpcsvc_request_t *req) { - return glusterd_big_locked_handler (req, __glusterd_handle_copy_file); + return glusterd_big_locked_handler(req, __glusterd_handle_copy_file); } int -glusterd_handle_gsync_set (rpcsvc_request_t *req) +glusterd_handle_gsync_set(rpcsvc_request_t *req) { - return glusterd_big_locked_handler (req, __glusterd_handle_gsync_set); + return glusterd_big_locked_handler(req, __glusterd_handle_gsync_set); } /***** @@ -384,1583 +380,1574 @@ glusterd_handle_gsync_set (rpcsvc_request_t *req) *****/ static void -glusterd_urltransform_init (runner_t *runner, const char *transname) +glusterd_urltransform_init(runner_t *runner, const char *transname) { - runinit (runner); - runner_add_arg (runner, GSYNCD_PREFIX"/gsyncd"); - runner_argprintf (runner, "--%s-url", transname); + runinit(runner); + runner_add_arg(runner, GSYNCD_PREFIX "/gsyncd"); + runner_argprintf(runner, "--%s-url", transname); } static void -glusterd_urltransform_add (runner_t *runner, const char *url) +glusterd_urltransform_add(runner_t *runner, const char *url) { - runner_add_arg (runner, url); + runner_add_arg(runner, url); } /* Helper routine to terminate just before slave_voluuid */ static int32_t -parse_slave_url (char *slv_url, char **slave) +parse_slave_url(char *slv_url, char **slave) { - char *tmp = NULL; - xlator_t *this = NULL; - int32_t ret = -1; - - this = THIS; - - /* slave format: - * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */ - *slave = strchr (slv_url, ':'); - if (!(*slave)) { - goto out; - } - (*slave)++; - - /* To terminate at : before slave volume uuid */ - tmp = strstr (*slave, "::"); - if (!tmp) { - goto out; - } - tmp += 2; - tmp = strchr (tmp, ':'); - if (!tmp) - gf_msg_debug (this->name, 0, "old slave: %s!", *slave); - else - *tmp = '\0'; - - ret = 0; - gf_msg_debug (this->name, 0, "parsed slave: %s!", *slave); + char *tmp = NULL; + xlator_t *this = NULL; + int32_t ret = -1; + + this = THIS; + + /* slave format: + * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */ + *slave = strchr(slv_url, ':'); + if (!(*slave)) { + goto out; + } + (*slave)++; + + /* To terminate at : before slave volume uuid */ + tmp = strstr(*slave, "::"); + if (!tmp) { + goto out; + } + tmp += 2; + tmp = strchr(tmp, ':'); + if (!tmp) + gf_msg_debug(this->name, 0, "old slave: %s!", *slave); + else + *tmp = '\0'; + + ret = 0; + gf_msg_debug(this->name, 0, "parsed slave: %s!", *slave); out: - return ret; + return ret; } static int -_glusterd_urltransform_add_iter (dict_t *dict, char *key, data_t *value, void *data) +_glusterd_urltransform_add_iter(dict_t *dict, char *key, data_t *value, + void *data) { - runner_t *runner = (runner_t *)data; - char slv_url[VOLINFO_SLAVE_URL_MAX] = {0}; - char *slave = NULL; - xlator_t *this = NULL; - int32_t ret = -1; - - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); - - gf_msg_debug (this->name, 0, "value->data %s", value->data); - - if (snprintf (slv_url, sizeof(slv_url), "%s", value->data) >= - sizeof (slv_url)) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "Error in copying slave: %s!", value->data); - goto out; - } - - ret = parse_slave_url (slv_url, &slave); - if (ret == -1) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "Error in parsing slave: %s!", value->data); - goto out; - } - - runner_add_arg (runner, slave); - ret = 0; + runner_t *runner = (runner_t *)data; + char slv_url[VOLINFO_SLAVE_URL_MAX] = {0}; + char *slave = NULL; + xlator_t *this = NULL; + int32_t ret = -1; + + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); + + gf_msg_debug(this->name, 0, "value->data %s", value->data); + + if (snprintf(slv_url, sizeof(slv_url), "%s", value->data) >= + sizeof(slv_url)) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "Error in copying slave: %s!", value->data); + goto out; + } + + ret = parse_slave_url(slv_url, &slave); + if (ret == -1) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "Error in parsing slave: %s!", value->data); + goto out; + } + + runner_add_arg(runner, slave); + ret = 0; out: - return ret; + return ret; } static void -glusterd_urltransform_free (char **linearr, unsigned n) +glusterd_urltransform_free(char **linearr, unsigned n) { - int i = 0; + int i = 0; - for (; i < n; i++) - GF_FREE (linearr[i]); + for (; i < n; i++) + GF_FREE(linearr[i]); - GF_FREE (linearr); + GF_FREE(linearr); } static int -glusterd_urltransform (runner_t *runner, char ***linearrp) +glusterd_urltransform(runner_t *runner, char ***linearrp) { - char **linearr = NULL; - char *line = NULL; - unsigned arr_len = 32; - unsigned arr_idx = 0; - gf_boolean_t error = _gf_false; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - linearr = GF_CALLOC (arr_len, sizeof (char *), gf_gld_mt_linearr); - if (!linearr) { + char **linearr = NULL; + char *line = NULL; + unsigned arr_len = 32; + unsigned arr_idx = 0; + gf_boolean_t error = _gf_false; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + linearr = GF_CALLOC(arr_len, sizeof(char *), gf_gld_mt_linearr); + if (!linearr) { + error = _gf_true; + goto out; + } + + runner_redir(runner, STDOUT_FILENO, RUN_PIPE); + if (runner_start(runner) != 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SPAWNING_CHILD_FAILED, + "spawning child failed"); + + error = _gf_true; + goto out; + } + + arr_idx = 0; + for (;;) { + size_t len; + line = GF_MALLOC(1024, gf_gld_mt_linebuf); + if (!line) { + error = _gf_true; + goto out; + } + + if (fgets(line, 1024, runner_chio(runner, STDOUT_FILENO)) == NULL) { + GF_FREE(line); + break; + } + + len = strlen(line); + if (len == 0 || line[len - 1] != '\n') { + GF_FREE(line); + error = _gf_true; + goto out; + } + line[len - 1] = '\0'; + + if (arr_idx == arr_len) { + void *p = linearr; + arr_len <<= 1; + p = GF_REALLOC(linearr, arr_len); + if (!p) { + GF_FREE(line); error = _gf_true; goto out; + } + linearr = p; } + linearr[arr_idx] = line; - runner_redir (runner, STDOUT_FILENO, RUN_PIPE); - if (runner_start (runner) != 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SPAWNING_CHILD_FAILED, - "spawning child failed"); - - error = _gf_true; - goto out; - } - - arr_idx = 0; - for (;;) { - size_t len; - line = GF_MALLOC (1024, gf_gld_mt_linebuf); - if (!line) { - error = _gf_true; - goto out; - } - - if (fgets (line, 1024, runner_chio (runner, STDOUT_FILENO)) == - NULL) { - GF_FREE (line); - break; - } - - len = strlen (line); - if (len == 0 || line[len - 1] != '\n') { - GF_FREE (line); - error = _gf_true; - goto out; - } - line[len - 1] = '\0'; - - if (arr_idx == arr_len) { - void *p = linearr; - arr_len <<= 1; - p = GF_REALLOC (linearr, arr_len); - if (!p) { - GF_FREE (line); - error = _gf_true; - goto out; - } - linearr = p; - } - linearr[arr_idx] = line; - - arr_idx++; - } - - out: + arr_idx++; + } - /* XXX chpid field is not exported by run API - * but runner_end() does not abort the invoked - * process (ie. it might block in waitpid(2)) - * so we resort to a manual kill a the private field - */ - if (error && runner->chpid > 0) - kill (runner->chpid, SIGKILL); - - if (runner_end (runner) != 0) - error = _gf_true; +out: - if (error) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_READ_CHILD_DATA_FAILED, - "reading data from child failed"); - glusterd_urltransform_free (linearr, arr_idx); - return -1; - } + /* XXX chpid field is not exported by run API + * but runner_end() does not abort the invoked + * process (ie. it might block in waitpid(2)) + * so we resort to a manual kill a the private field + */ + if (error && runner->chpid > 0) + kill(runner->chpid, SIGKILL); + + if (runner_end(runner) != 0) + error = _gf_true; + + if (error) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_READ_CHILD_DATA_FAILED, + "reading data from child failed"); + glusterd_urltransform_free(linearr, arr_idx); + return -1; + } - *linearrp = linearr; - return arr_idx; + *linearrp = linearr; + return arr_idx; } static int -glusterd_urltransform_single (const char *url, const char *transname, - char ***linearrp) +glusterd_urltransform_single(const char *url, const char *transname, + char ***linearrp) { - runner_t runner = {0,}; + runner_t runner = { + 0, + }; - glusterd_urltransform_init (&runner, transname); - glusterd_urltransform_add (&runner, url); - return glusterd_urltransform (&runner, linearrp); + glusterd_urltransform_init(&runner, transname); + glusterd_urltransform_add(&runner, url); + return glusterd_urltransform(&runner, linearrp); } - struct dictidxmark { - unsigned isrch; - unsigned ithis; - char *ikey; + unsigned isrch; + unsigned ithis; + char *ikey; }; - struct slave_vol_config { - char old_slvhost[_POSIX_HOST_NAME_MAX+1]; - char old_slvuser[LOGIN_NAME_MAX]; - unsigned old_slvidx; - char slave_voluuid[GF_UUID_BUF_SIZE]; + char old_slvhost[_POSIX_HOST_NAME_MAX + 1]; + char old_slvuser[LOGIN_NAME_MAX]; + unsigned old_slvidx; + char slave_voluuid[GF_UUID_BUF_SIZE]; }; static int -_dict_mark_atindex (dict_t *dict, char *key, data_t *value, void *data) +_dict_mark_atindex(dict_t *dict, char *key, data_t *value, void *data) { - struct dictidxmark *dim = data; + struct dictidxmark *dim = data; - if (dim->isrch == dim->ithis) - dim->ikey = key; + if (dim->isrch == dim->ithis) + dim->ikey = key; - dim->ithis++; - return 0; + dim->ithis++; + return 0; } static char * -dict_get_by_index (dict_t *dict, unsigned i) +dict_get_by_index(dict_t *dict, unsigned i) { - struct dictidxmark dim = {0,}; + struct dictidxmark dim = { + 0, + }; - dim.isrch = i; - dict_foreach (dict, _dict_mark_atindex, &dim); + dim.isrch = i; + dict_foreach(dict, _dict_mark_atindex, &dim); - return dim.ikey; + return dim.ikey; } static int -glusterd_get_slave (glusterd_volinfo_t *vol, const char *slaveurl, char **slavekey) +glusterd_get_slave(glusterd_volinfo_t *vol, const char *slaveurl, + char **slavekey) { - runner_t runner = {0,}; - int n = 0; - int i = 0; - char **linearr = NULL; - int32_t ret = 0; - - glusterd_urltransform_init (&runner, "canonicalize"); - ret = dict_foreach (vol->gsync_slaves, _glusterd_urltransform_add_iter, - &runner); - if (ret < 0) - return -2; - - glusterd_urltransform_add (&runner, slaveurl); - - n = glusterd_urltransform (&runner, &linearr); - if (n == -1) - return -2; - - for (i = 0; i < n - 1; i++) { - if (strcmp (linearr[i], linearr[n - 1]) == 0) - break; - } - glusterd_urltransform_free (linearr, n); - - if (i < n - 1) - *slavekey = dict_get_by_index (vol->gsync_slaves, i); - else - i = -1; - - return i; + runner_t runner = { + 0, + }; + int n = 0; + int i = 0; + char **linearr = NULL; + int32_t ret = 0; + + glusterd_urltransform_init(&runner, "canonicalize"); + ret = dict_foreach(vol->gsync_slaves, _glusterd_urltransform_add_iter, + &runner); + if (ret < 0) + return -2; + + glusterd_urltransform_add(&runner, slaveurl); + + n = glusterd_urltransform(&runner, &linearr); + if (n == -1) + return -2; + + for (i = 0; i < n - 1; i++) { + if (strcmp(linearr[i], linearr[n - 1]) == 0) + break; + } + glusterd_urltransform_free(linearr, n); + + if (i < n - 1) + *slavekey = dict_get_by_index(vol->gsync_slaves, i); + else + i = -1; + + return i; } static int -glusterd_query_extutil_generic (char *resbuf, size_t blen, runner_t *runner, void *data, - int (*fcbk)(char *resbuf, size_t blen, FILE *fp, void *data)) +glusterd_query_extutil_generic(char *resbuf, size_t blen, runner_t *runner, + void *data, + int (*fcbk)(char *resbuf, size_t blen, FILE *fp, + void *data)) { - int ret = 0; - xlator_t *this = NULL; + int ret = 0; + xlator_t *this = NULL; - this = THIS; - GF_ASSERT (this); + this = THIS; + GF_ASSERT(this); - runner_redir (runner, STDOUT_FILENO, RUN_PIPE); - if (runner_start (runner) != 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SPAWNING_CHILD_FAILED, - "spawning child failed"); + runner_redir(runner, STDOUT_FILENO, RUN_PIPE); + if (runner_start(runner) != 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SPAWNING_CHILD_FAILED, + "spawning child failed"); - return -1; - } + return -1; + } - ret = fcbk (resbuf, blen, runner_chio (runner, STDOUT_FILENO), data); + ret = fcbk(resbuf, blen, runner_chio(runner, STDOUT_FILENO), data); - ret |= runner_end (runner); - if (ret) - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_READ_CHILD_DATA_FAILED, - "reading data from child failed"); + ret |= runner_end(runner); + if (ret) + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_READ_CHILD_DATA_FAILED, + "reading data from child failed"); - return ret ? -1 : 0; + return ret ? -1 : 0; } static int _fcbk_singleline(char *resbuf, size_t blen, FILE *fp, void *data) { - char *ptr = NULL; + char *ptr = NULL; - errno = 0; - ptr = fgets (resbuf, blen, fp); - if (ptr) { - size_t len = strlen(resbuf); - if (len && resbuf[len-1] == '\n') - resbuf[len-1] = '\0'; //strip off \n - } + errno = 0; + ptr = fgets(resbuf, blen, fp); + if (ptr) { + size_t len = strlen(resbuf); + if (len && resbuf[len - 1] == '\n') + resbuf[len - 1] = '\0'; // strip off \n + } - return errno ? -1 : 0; + return errno ? -1 : 0; } static int -glusterd_query_extutil (char *resbuf, runner_t *runner) +glusterd_query_extutil(char *resbuf, runner_t *runner) { - return glusterd_query_extutil_generic (resbuf, PATH_MAX, runner, NULL, - _fcbk_singleline); + return glusterd_query_extutil_generic(resbuf, PATH_MAX, runner, NULL, + _fcbk_singleline); } static int -glusterd_get_slave_voluuid (char *slave_host, char *slave_vol, char *vol_uuid) +glusterd_get_slave_voluuid(char *slave_host, char *slave_vol, char *vol_uuid) { - runner_t runner = {0,}; - glusterd_conf_t *priv = NULL; - xlator_t *this = NULL; - int ret = -1; + runner_t runner = { + 0, + }; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + int ret = -1; - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); - priv = this->private; - GF_VALIDATE_OR_GOTO (this->name, priv, out); + priv = this->private; + GF_VALIDATE_OR_GOTO(this->name, priv, out); - runinit (&runner); - runner_add_arg (&runner, GSYNCD_PREFIX"/gsyncd"); - runner_add_arg (&runner, "--slavevoluuid-get"); - runner_argprintf (&runner, "%s::%s", slave_host, slave_vol); + runinit(&runner); + runner_add_arg(&runner, GSYNCD_PREFIX "/gsyncd"); + runner_add_arg(&runner, "--slavevoluuid-get"); + runner_argprintf(&runner, "%s::%s", slave_host, slave_vol); - synclock_unlock (&priv->big_lock); - ret = glusterd_query_extutil (vol_uuid, &runner); - synclock_lock (&priv->big_lock); + synclock_unlock(&priv->big_lock); + ret = glusterd_query_extutil(vol_uuid, &runner); + synclock_lock(&priv->big_lock); out: - return ret; + return ret; } - static int -_fcbk_conftodict (char *resbuf, size_t blen, FILE *fp, void *data) +_fcbk_conftodict(char *resbuf, size_t blen, FILE *fp, void *data) { - char *ptr = NULL; - dict_t *dict = data; - char *v = NULL; - - for (;;) { - errno = 0; - ptr = fgets (resbuf, blen, fp); - if (!ptr) - break; - v = resbuf + strlen(resbuf) - 1; - while (isspace (*v)) - /* strip trailing space */ - *v-- = '\0'; - if (v == resbuf) - /* skip empty line */ - continue; - v = strchr (resbuf, ':'); - if (!v) - return -1; - *v++ = '\0'; - while (isspace (*v)) - v++; - v = gf_strdup (v); - if (!v) - return -1; - if (dict_set_dynstr (dict, resbuf, v) != 0) { - GF_FREE (v); - return -1; - } - } + char *ptr = NULL; + dict_t *dict = data; + char *v = NULL; - return errno ? -1 : 0; + for (;;) { + errno = 0; + ptr = fgets(resbuf, blen, fp); + if (!ptr) + break; + v = resbuf + strlen(resbuf) - 1; + while (isspace(*v)) + /* strip trailing space */ + *v-- = '\0'; + if (v == resbuf) + /* skip empty line */ + continue; + v = strchr(resbuf, ':'); + if (!v) + return -1; + *v++ = '\0'; + while (isspace(*v)) + v++; + v = gf_strdup(v); + if (!v) + return -1; + if (dict_set_dynstr(dict, resbuf, v) != 0) { + GF_FREE(v); + return -1; + } + } + + return errno ? -1 : 0; } static int -glusterd_gsync_get_config (char *master, char *slave, char *conf_path, dict_t *dict) +glusterd_gsync_get_config(char *master, char *slave, char *conf_path, + dict_t *dict) { - /* key + value, where value must be able to accommodate a path */ - char resbuf[256 + PATH_MAX] = {0,}; - runner_t runner = {0,}; - - runinit (&runner); - runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL); - runner_argprintf (&runner, "%s", conf_path); - runner_argprintf (&runner, "--iprefix=%s", DATADIR); - runner_argprintf (&runner, ":%s", master); - runner_add_args (&runner, slave, "--config-get-all", NULL); - - return glusterd_query_extutil_generic (resbuf, sizeof (resbuf), - &runner, dict, _fcbk_conftodict); + /* key + value, where value must be able to accommodate a path */ + char resbuf[256 + PATH_MAX] = { + 0, + }; + runner_t runner = { + 0, + }; + + runinit(&runner); + runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL); + runner_argprintf(&runner, "%s", conf_path); + runner_argprintf(&runner, "--iprefix=%s", DATADIR); + runner_argprintf(&runner, ":%s", master); + runner_add_args(&runner, slave, "--config-get-all", NULL); + + return glusterd_query_extutil_generic(resbuf, sizeof(resbuf), &runner, dict, + _fcbk_conftodict); } static int -_fcbk_statustostruct (char *resbuf, size_t blen, FILE *fp, - void *data) +_fcbk_statustostruct(char *resbuf, size_t blen, FILE *fp, void *data) { - char *ptr = NULL; - char *v = NULL; - char *k = NULL; - gf_gsync_status_t *sts_val = NULL; - - sts_val = (gf_gsync_status_t *)data; - - for (;;) { - errno = 0; - ptr = fgets (resbuf, blen, fp); - if (!ptr) - break; - - v = resbuf + strlen(resbuf) - 1; - while (isspace (*v)) - /* strip trailing space */ - *v-- = '\0'; - if (v == resbuf) - /* skip empty line */ - continue; - v = strchr (resbuf, ':'); - if (!v) - return -1; - *v++ = '\0'; - while (isspace (*v)) - v++; - v = gf_strdup (v); - if (!v) - return -1; - - k = gf_strdup (resbuf); - if (!k) { - GF_FREE (v); - return -1; - } + char *ptr = NULL; + char *v = NULL; + char *k = NULL; + gf_gsync_status_t *sts_val = NULL; - if (strcmp (k, "worker_status") == 0) { - memcpy (sts_val->worker_status, v, - strlen(v)); - sts_val->worker_status[strlen(v)] = '\0'; - } else if (strcmp (k, "slave_node") == 0) { - memcpy (sts_val->slave_node, v, - strlen(v)); - sts_val->slave_node[strlen(v)] = '\0'; - } else if (strcmp (k, "crawl_status") == 0) { - memcpy (sts_val->crawl_status, v, - strlen(v)); - sts_val->crawl_status[strlen(v)] = '\0'; - } else if (strcmp (k, "last_synced") == 0) { - memcpy (sts_val->last_synced, v, - strlen(v)); - sts_val->last_synced[strlen(v)] = '\0'; - } else if (strcmp (k, "last_synced_utc") == 0) { - memcpy (sts_val->last_synced_utc, v, - strlen(v)); - sts_val->last_synced_utc[strlen(v)] = '\0'; - } else if (strcmp (k, "entry") == 0) { - memcpy (sts_val->entry, v, - strlen(v)); - sts_val->entry[strlen(v)] = '\0'; - } else if (strcmp (k, "data") == 0) { - memcpy (sts_val->data, v, - strlen(v)); - sts_val->data[strlen(v)] = '\0'; - } else if (strcmp (k, "meta") == 0) { - memcpy (sts_val->meta, v, - strlen(v)); - sts_val->meta[strlen(v)] = '\0'; - } else if (strcmp (k, "failures") == 0) { - memcpy (sts_val->failures, v, - strlen(v)); - sts_val->failures[strlen(v)] = '\0'; - } else if (strcmp (k, "checkpoint_time") == 0) { - memcpy (sts_val->checkpoint_time, v, - strlen(v)); - sts_val->checkpoint_time[strlen(v)] = '\0'; - } else if (strcmp (k, "checkpoint_time_utc") == 0) { - memcpy (sts_val->checkpoint_time_utc, v, - strlen(v)); - sts_val->checkpoint_time_utc[strlen(v)] = '\0'; - } else if (strcmp (k, "checkpoint_completed") == 0) { - memcpy (sts_val->checkpoint_completed, v, - strlen(v)); - sts_val->checkpoint_completed[strlen(v)] = '\0'; - } else if (strcmp (k, "checkpoint_completion_time") == 0) { - memcpy (sts_val->checkpoint_completion_time, v, - strlen(v)); - sts_val->checkpoint_completion_time[strlen(v)] = '\0'; - } else if (strcmp (k, "checkpoint_completion_time_utc") == 0) { - memcpy (sts_val->checkpoint_completion_time_utc, v, - strlen(v)); - sts_val->checkpoint_completion_time_utc[strlen(v)] = - '\0'; - } - GF_FREE(v); - GF_FREE(k); - } + sts_val = (gf_gsync_status_t *)data; - return errno ? -1 : 0; + for (;;) { + errno = 0; + ptr = fgets(resbuf, blen, fp); + if (!ptr) + break; + + v = resbuf + strlen(resbuf) - 1; + while (isspace(*v)) + /* strip trailing space */ + *v-- = '\0'; + if (v == resbuf) + /* skip empty line */ + continue; + v = strchr(resbuf, ':'); + if (!v) + return -1; + *v++ = '\0'; + while (isspace(*v)) + v++; + v = gf_strdup(v); + if (!v) + return -1; + + k = gf_strdup(resbuf); + if (!k) { + GF_FREE(v); + return -1; + } + + if (strcmp(k, "worker_status") == 0) { + memcpy(sts_val->worker_status, v, strlen(v)); + sts_val->worker_status[strlen(v)] = '\0'; + } else if (strcmp(k, "slave_node") == 0) { + memcpy(sts_val->slave_node, v, strlen(v)); + sts_val->slave_node[strlen(v)] = '\0'; + } else if (strcmp(k, "crawl_status") == 0) { + memcpy(sts_val->crawl_status, v, strlen(v)); + sts_val->crawl_status[strlen(v)] = '\0'; + } else if (strcmp(k, "last_synced") == 0) { + memcpy(sts_val->last_synced, v, strlen(v)); + sts_val->last_synced[strlen(v)] = '\0'; + } else if (strcmp(k, "last_synced_utc") == 0) { + memcpy(sts_val->last_synced_utc, v, strlen(v)); + sts_val->last_synced_utc[strlen(v)] = '\0'; + } else if (strcmp(k, "entry") == 0) { + memcpy(sts_val->entry, v, strlen(v)); + sts_val->entry[strlen(v)] = '\0'; + } else if (strcmp(k, "data") == 0) { + memcpy(sts_val->data, v, strlen(v)); + sts_val->data[strlen(v)] = '\0'; + } else if (strcmp(k, "meta") == 0) { + memcpy(sts_val->meta, v, strlen(v)); + sts_val->meta[strlen(v)] = '\0'; + } else if (strcmp(k, "failures") == 0) { + memcpy(sts_val->failures, v, strlen(v)); + sts_val->failures[strlen(v)] = '\0'; + } else if (strcmp(k, "checkpoint_time") == 0) { + memcpy(sts_val->checkpoint_time, v, strlen(v)); + sts_val->checkpoint_time[strlen(v)] = '\0'; + } else if (strcmp(k, "checkpoint_time_utc") == 0) { + memcpy(sts_val->checkpoint_time_utc, v, strlen(v)); + sts_val->checkpoint_time_utc[strlen(v)] = '\0'; + } else if (strcmp(k, "checkpoint_completed") == 0) { + memcpy(sts_val->checkpoint_completed, v, strlen(v)); + sts_val->checkpoint_completed[strlen(v)] = '\0'; + } else if (strcmp(k, "checkpoint_completion_time") == 0) { + memcpy(sts_val->checkpoint_completion_time, v, strlen(v)); + sts_val->checkpoint_completion_time[strlen(v)] = '\0'; + } else if (strcmp(k, "checkpoint_completion_time_utc") == 0) { + memcpy(sts_val->checkpoint_completion_time_utc, v, strlen(v)); + sts_val->checkpoint_completion_time_utc[strlen(v)] = '\0'; + } + GF_FREE(v); + GF_FREE(k); + } + + return errno ? -1 : 0; } - static int -glusterd_gsync_get_status (char *master, char *slave, char *conf_path, - char *brick_path, gf_gsync_status_t *sts_val) +glusterd_gsync_get_status(char *master, char *slave, char *conf_path, + char *brick_path, gf_gsync_status_t *sts_val) { - /* key + value, where value must be able to accommodate a path */ - char resbuf[256 + PATH_MAX] = {0,}; - runner_t runner = {0,}; - - runinit (&runner); - runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL); - runner_argprintf (&runner, "%s", conf_path); - runner_argprintf (&runner, "--iprefix=%s", DATADIR); - runner_argprintf (&runner, ":%s", master); - runner_add_args (&runner, slave, "--status-get", NULL); - runner_add_args (&runner, "--path", brick_path, NULL); - - return glusterd_query_extutil_generic (resbuf, sizeof (resbuf), - &runner, sts_val, - _fcbk_statustostruct); + /* key + value, where value must be able to accommodate a path */ + char resbuf[256 + PATH_MAX] = { + 0, + }; + runner_t runner = { + 0, + }; + + runinit(&runner); + runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL); + runner_argprintf(&runner, "%s", conf_path); + runner_argprintf(&runner, "--iprefix=%s", DATADIR); + runner_argprintf(&runner, ":%s", master); + runner_add_args(&runner, slave, "--status-get", NULL); + runner_add_args(&runner, "--path", brick_path, NULL); + + return glusterd_query_extutil_generic(resbuf, sizeof(resbuf), &runner, + sts_val, _fcbk_statustostruct); } static int -glusterd_gsync_get_param_file (char *prmfile, const char *param, char *master, - char *slave, char *conf_path) +glusterd_gsync_get_param_file(char *prmfile, const char *param, char *master, + char *slave, char *conf_path) { - runner_t runner = {0,}; - - runinit (&runner); - runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL); - runner_argprintf (&runner, "%s", conf_path); - runner_argprintf (&runner, "--iprefix=%s", DATADIR); - runner_argprintf (&runner, ":%s", master); - runner_add_args (&runner, slave, "--config-get", NULL); - runner_argprintf (&runner, "%s-file", param); - - return glusterd_query_extutil (prmfile, &runner); + runner_t runner = { + 0, + }; + + runinit(&runner); + runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL); + runner_argprintf(&runner, "%s", conf_path); + runner_argprintf(&runner, "--iprefix=%s", DATADIR); + runner_argprintf(&runner, ":%s", master); + runner_add_args(&runner, slave, "--config-get", NULL); + runner_argprintf(&runner, "%s-file", param); + + return glusterd_query_extutil(prmfile, &runner); } static int -gsyncd_getpidfile (char *master, char *slave, char *pidfile, - char *conf_path, gf_boolean_t *is_template_in_use) +gsyncd_getpidfile(char *master, char *slave, char *pidfile, char *conf_path, + gf_boolean_t *is_template_in_use) { - char temp_conf_path[PATH_MAX] = ""; - char *working_conf_path = NULL; - glusterd_conf_t *priv = NULL; - int ret = -1; - struct stat stbuf = {0,}; - xlator_t *this = NULL; - int32_t len = 0; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (this->private); - GF_ASSERT (conf_path); - - priv = this->private; - - GF_VALIDATE_OR_GOTO ("gsync", master, out); - GF_VALIDATE_OR_GOTO ("gsync", slave, out); - - len = snprintf (temp_conf_path, sizeof(temp_conf_path), - "%s/"GSYNC_CONF_TEMPLATE, priv->workdir); - if ((len < 0) || (len >= sizeof(temp_conf_path))) { - goto out; - } - - ret = sys_lstat (conf_path, &stbuf); - if (!ret) { - gf_msg_debug (this->name, 0, "Using passed config template(%s).", - conf_path); - working_conf_path = conf_path; - } else { - gf_msg (this->name, GF_LOG_WARNING, ENOENT, - GD_MSG_FILE_OP_FAILED, - "Config file (%s) missing. Looking for template " - "config file (%s)", conf_path, temp_conf_path); - ret = sys_lstat (temp_conf_path, &stbuf); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, ENOENT, - GD_MSG_FILE_OP_FAILED, - "Template config file (%s) missing.", - temp_conf_path); - goto out; - } - gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG, - "Using default config template(%s).", - temp_conf_path); - working_conf_path = temp_conf_path; - *is_template_in_use = _gf_true; + char temp_conf_path[PATH_MAX] = ""; + char *working_conf_path = NULL; + glusterd_conf_t *priv = NULL; + int ret = -1; + struct stat stbuf = { + 0, + }; + xlator_t *this = NULL; + int32_t len = 0; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(this->private); + GF_ASSERT(conf_path); + + priv = this->private; + + GF_VALIDATE_OR_GOTO("gsync", master, out); + GF_VALIDATE_OR_GOTO("gsync", slave, out); + + len = snprintf(temp_conf_path, sizeof(temp_conf_path), + "%s/" GSYNC_CONF_TEMPLATE, priv->workdir); + if ((len < 0) || (len >= sizeof(temp_conf_path))) { + goto out; + } + + ret = sys_lstat(conf_path, &stbuf); + if (!ret) { + gf_msg_debug(this->name, 0, "Using passed config template(%s).", + conf_path); + working_conf_path = conf_path; + } else { + gf_msg(this->name, GF_LOG_WARNING, ENOENT, GD_MSG_FILE_OP_FAILED, + "Config file (%s) missing. Looking for template " + "config file (%s)", + conf_path, temp_conf_path); + ret = sys_lstat(temp_conf_path, &stbuf); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED, + "Template config file (%s) missing.", temp_conf_path); + goto out; } + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG, + "Using default config template(%s).", temp_conf_path); + working_conf_path = temp_conf_path; + *is_template_in_use = _gf_true; + } fetch_data: - ret = glusterd_gsync_get_param_file (pidfile, "pid", master, - slave, working_conf_path); - if ((ret == -1) || strlen(pidfile) == 0) { - if (*is_template_in_use == _gf_false) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_PIDFILE_CREATE_FAILED, - "failed to create the pidfile string. " - "Trying default config template"); - working_conf_path = temp_conf_path; - *is_template_in_use = _gf_true; - goto fetch_data; - } else { - ret = -2; - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_PIDFILE_CREATE_FAILED, "failed to " - "create the pidfile string from template " - "config"); - goto out; - } + ret = glusterd_gsync_get_param_file(pidfile, "pid", master, slave, + working_conf_path); + if ((ret == -1) || strlen(pidfile) == 0) { + if (*is_template_in_use == _gf_false) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PIDFILE_CREATE_FAILED, + "failed to create the pidfile string. " + "Trying default config template"); + working_conf_path = temp_conf_path; + *is_template_in_use = _gf_true; + goto fetch_data; + } else { + ret = -2; + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PIDFILE_CREATE_FAILED, + "failed to " + "create the pidfile string from template " + "config"); + goto out; } + } - gf_msg_debug (this->name, 0, "pidfile = %s", pidfile); + gf_msg_debug(this->name, 0, "pidfile = %s", pidfile); - ret = open (pidfile, O_RDWR); - out: - return ret; + ret = open(pidfile, O_RDWR); +out: + return ret; } static int -gsync_status_byfd (int fd) +gsync_status_byfd(int fd) { - GF_ASSERT (fd >= -1); + GF_ASSERT(fd >= -1); - if (lockf (fd, F_TEST, 0) == -1 && - (errno == EAGAIN || errno == EACCES)) - /* gsyncd keeps the pidfile locked */ - return 0; + if (lockf(fd, F_TEST, 0) == -1 && (errno == EAGAIN || errno == EACCES)) + /* gsyncd keeps the pidfile locked */ + return 0; - return -1; + return -1; } /* status: return 0 when gsync is running * return -1 when not running */ int -gsync_status (char *master, char *slave, char *conf_path, - int *status, gf_boolean_t *is_template_in_use) +gsync_status(char *master, char *slave, char *conf_path, int *status, + gf_boolean_t *is_template_in_use) { - char pidfile[PATH_MAX] = {0,}; - int fd = -1; - - fd = gsyncd_getpidfile (master, slave, pidfile, - conf_path, is_template_in_use); - if (fd == -2) - return -1; + char pidfile[PATH_MAX] = { + 0, + }; + int fd = -1; + + fd = gsyncd_getpidfile(master, slave, pidfile, conf_path, + is_template_in_use); + if (fd == -2) + return -1; - *status = gsync_status_byfd (fd); + *status = gsync_status_byfd(fd); - sys_close (fd); + sys_close(fd); - return 0; + return 0; } - static int32_t -glusterd_gsync_volinfo_dict_set (glusterd_volinfo_t *volinfo, - char *key, char *value) +glusterd_gsync_volinfo_dict_set(glusterd_volinfo_t *volinfo, char *key, + char *value) { - int32_t ret = -1; - char *gsync_status = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - gsync_status = gf_strdup (value); - if (!gsync_status) { - gf_msg (this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, - "Unable to allocate memory"); - goto out; - } - - ret = dict_set_dynstr (volinfo->dict, key, gsync_status); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, - "Unable to set dict"); - goto out; - } - - ret = 0; + int32_t ret = -1; + char *gsync_status = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + gsync_status = gf_strdup(value); + if (!gsync_status) { + gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, + "Unable to allocate memory"); + goto out; + } + + ret = dict_set_dynstr(volinfo->dict, key, gsync_status); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Unable to set dict"); + goto out; + } + + ret = 0; out: - return ret; + return ret; } static int -glusterd_verify_gsyncd_spawn (char *master, char *slave) +glusterd_verify_gsyncd_spawn(char *master, char *slave) { - int ret = 0; - runner_t runner = {0,}; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - runinit (&runner); - runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", - "--verify", "spawning", NULL); - runner_argprintf (&runner, ":%s", master); - runner_add_args (&runner, slave, NULL); - runner_redir (&runner, STDOUT_FILENO, RUN_PIPE); - ret = runner_start (&runner); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SPAWNING_CHILD_FAILED, - "spawning child failed"); - ret = -1; - goto out; - } - - if (runner_end (&runner) != 0) - ret = -1; + int ret = 0; + runner_t runner = { + 0, + }; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + runinit(&runner); + runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "--verify", "spawning", + NULL); + runner_argprintf(&runner, ":%s", master); + runner_add_args(&runner, slave, NULL); + runner_redir(&runner, STDOUT_FILENO, RUN_PIPE); + ret = runner_start(&runner); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SPAWNING_CHILD_FAILED, + "spawning child failed"); + ret = -1; + goto out; + } + + if (runner_end(&runner) != 0) + ret = -1; out: - gf_msg_debug (this->name, 0, "returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "returning %d", ret); + return ret; } static int -gsync_verify_config_options (dict_t *dict, char **op_errstr, char *volname) +gsync_verify_config_options(dict_t *dict, char **op_errstr, char *volname) { - char **resopt = NULL; - int i = 0; - int ret = -1; - char *subop = NULL; - char *slave = NULL; - char *op_name = NULL; - char *op_value = NULL; - char *t = NULL; - char errmsg[PATH_MAX] = ""; - gf_boolean_t banned = _gf_true; - gf_boolean_t op_match = _gf_true; - gf_boolean_t val_match = _gf_true; - struct gsync_config_opt_vals_ *conf_vals = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - if (dict_get_str (dict, "subop", &subop) != 0) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, - "missing subop"); - *op_errstr = gf_strdup ("Invalid config request"); - return -1; - } - - if (dict_get_str (dict, "slave", &slave) != 0) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, - GEOREP" CONFIG: no slave given"); - *op_errstr = gf_strdup ("Slave required"); - return -1; - } - - if (strcmp (subop, "get-all") == 0) - return 0; - - if (dict_get_str (dict, "op_name", &op_name) != 0) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, - "option name missing"); - *op_errstr = gf_strdup ("Option name missing"); - return -1; - } - - if (runcmd (GSYNCD_PREFIX"/gsyncd", "--config-check", op_name, NULL)) { - ret = glusterd_verify_gsyncd_spawn (volname, slave); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GSYNCD_SPAWN_FAILED, "Unable to spawn " - "gsyncd"); - return 0; - } - - gf_msg (this->name, GF_LOG_WARNING, EINVAL, - GD_MSG_INVALID_ENTRY, - "Invalid option %s", op_name); - *op_errstr = gf_strdup ("Invalid option"); + char **resopt = NULL; + int i = 0; + int ret = -1; + char *subop = NULL; + char *slave = NULL; + char *op_name = NULL; + char *op_value = NULL; + char *t = NULL; + char errmsg[PATH_MAX] = ""; + gf_boolean_t banned = _gf_true; + gf_boolean_t op_match = _gf_true; + gf_boolean_t val_match = _gf_true; + struct gsync_config_opt_vals_ *conf_vals = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + if (dict_get_str(dict, "subop", &subop) != 0) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, + "missing subop"); + *op_errstr = gf_strdup("Invalid config request"); + return -1; + } - return -1; - } + if (dict_get_str(dict, "slave", &slave) != 0) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, + GEOREP " CONFIG: no slave given"); + *op_errstr = gf_strdup("Slave required"); + return -1; + } - if (strcmp (subop, "get") == 0) - return 0; + if (strcmp(subop, "get-all") == 0) + return 0; - t = strtail (subop, "set"); - if (!t) - t = strtail (subop, "del"); - if (!t || (t[0] && strcmp (t, "-glob") != 0)) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_SUBOP_NOT_FOUND, - "unknown subop %s", subop); - *op_errstr = gf_strdup ("Invalid config request"); - return -1; - } + if (dict_get_str(dict, "op_name", &op_name) != 0) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, + "option name missing"); + *op_errstr = gf_strdup("Option name missing"); + return -1; + } - if (strtail (subop, "set") && - dict_get_str (dict, "op_value", &op_value) != 0) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, - "missing value for set"); - *op_errstr = gf_strdup ("missing value"); + if (runcmd(GSYNCD_PREFIX "/gsyncd", "--config-check", op_name, NULL)) { + ret = glusterd_verify_gsyncd_spawn(volname, slave); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED, + "Unable to spawn " + "gsyncd"); + return 0; } - /* match option name against reserved options, modulo -/_ - * difference - */ - for (resopt = gsync_reserved_opts; *resopt; resopt++) { - banned = _gf_true; - for (i = 0; (*resopt)[i] && op_name[i]; i++) { - if ((*resopt)[i] == op_name[i] || - ((*resopt)[i] == '-' && op_name[i] == '_')) - continue; - banned = _gf_false; - } + gf_msg(this->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY, + "Invalid option %s", op_name); + *op_errstr = gf_strdup("Invalid option"); - if (op_name[i] != '\0') - banned = _gf_false; + return -1; + } - if (banned) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_RESERVED_OPTION, - "Reserved option %s", op_name); - *op_errstr = gf_strdup ("Reserved option"); + if (strcmp(subop, "get") == 0) + return 0; - return -1; - break; + t = strtail(subop, "set"); + if (!t) + t = strtail(subop, "del"); + if (!t || (t[0] && strcmp(t, "-glob") != 0)) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_SUBOP_NOT_FOUND, + "unknown subop %s", subop); + *op_errstr = gf_strdup("Invalid config request"); + return -1; + } + + if (strtail(subop, "set") && + dict_get_str(dict, "op_value", &op_value) != 0) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, + "missing value for set"); + *op_errstr = gf_strdup("missing value"); + } + + /* match option name against reserved options, modulo -/_ + * difference + */ + for (resopt = gsync_reserved_opts; *resopt; resopt++) { + banned = _gf_true; + for (i = 0; (*resopt)[i] && op_name[i]; i++) { + if ((*resopt)[i] == op_name[i] || + ((*resopt)[i] == '-' && op_name[i] == '_')) + continue; + banned = _gf_false; + } + + if (op_name[i] != '\0') + banned = _gf_false; + + if (banned) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_RESERVED_OPTION, + "Reserved option %s", op_name); + *op_errstr = gf_strdup("Reserved option"); + + return -1; + break; + } + } + + /* Check options in gsync_confopt_vals for invalid values */ + for (conf_vals = gsync_confopt_vals; conf_vals->op_name; conf_vals++) { + op_match = _gf_true; + for (i = 0; conf_vals->op_name[i] && op_name[i]; i++) { + if (conf_vals->op_name[i] == op_name[i] || + (conf_vals->op_name[i] == '_' && op_name[i] == '-')) + continue; + op_match = _gf_false; + } + + if (op_match) { + if (!op_value) + goto out; + val_match = _gf_false; + for (i = 0; i < conf_vals->no_of_pos_vals; i++) { + if (conf_vals->case_sensitive) { + if (!strcmp(conf_vals->values[i], op_value)) + val_match = _gf_true; + } else { + if (!strcasecmp(conf_vals->values[i], op_value)) + val_match = _gf_true; } - } + } - /* Check options in gsync_confopt_vals for invalid values */ - for (conf_vals = gsync_confopt_vals; conf_vals->op_name; conf_vals++) { - op_match = _gf_true; - for (i = 0; conf_vals->op_name[i] && op_name[i]; i++) { - if (conf_vals->op_name[i] == op_name[i] || - (conf_vals->op_name[i] == '_' && op_name[i] == '-')) - continue; - op_match = _gf_false; - } + if (!val_match) { + ret = snprintf(errmsg, sizeof(errmsg) - 1, + "Invalid value(%s) for" + " option %s", + op_value, op_name); + errmsg[ret] = '\0'; - if (op_match) { - if (!op_value) - goto out; - val_match = _gf_false; - for (i = 0; i < conf_vals->no_of_pos_vals; i++) { - if(conf_vals->case_sensitive){ - if (!strcmp (conf_vals->values[i], op_value)) - val_match = _gf_true; - } else { - if (!strcasecmp (conf_vals->values[i], op_value)) - val_match = _gf_true; - } - } - - if (!val_match) { - ret = snprintf (errmsg, sizeof(errmsg) - 1, - "Invalid value(%s) for" - " option %s", op_value, - op_name); - errmsg[ret] = '\0'; - - gf_msg (this->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", errmsg); - *op_errstr = gf_strdup (errmsg); - return -1; - } - } + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, + "%s", errmsg); + *op_errstr = gf_strdup(errmsg); + return -1; + } } + } out: - return 0; + return 0; } static int -glusterd_get_gsync_status_mst_slv (glusterd_volinfo_t *volinfo, - char *slave, char *conf_path, - dict_t *rsp_dict, char *node); +glusterd_get_gsync_status_mst_slv(glusterd_volinfo_t *volinfo, char *slave, + char *conf_path, dict_t *rsp_dict, + char *node); static int -_get_status_mst_slv (dict_t *dict, char *key, data_t *value, void *data) +_get_status_mst_slv(dict_t *dict, char *key, data_t *value, void *data) { - glusterd_gsync_status_temp_t *param = NULL; - char *slave = NULL; - char *slave_buf = NULL; - char *slave_url = NULL; - char *slave_vol = NULL; - char *slave_host = NULL; - char *errmsg = NULL; - char conf_path[PATH_MAX] = ""; - int ret = -1; - glusterd_conf_t *priv = NULL; - xlator_t *this = NULL; - char slv_url[VOLINFO_SLAVE_URL_MAX] = {0}; - - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); - - param = (glusterd_gsync_status_temp_t *)data; - - GF_VALIDATE_OR_GOTO (this->name, param, out); - GF_VALIDATE_OR_GOTO (this->name, param->volinfo, out); - - if (this) - priv = this->private; - GF_VALIDATE_OR_GOTO (this->name, priv, out); - - if (snprintf (slv_url, sizeof(slv_url), "%s", value->data) >= - sizeof (slv_url)) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "Error in copying slave: %s!", value->data); - goto out; - } - - ret = parse_slave_url (slv_url, &slave); - if (ret == -1) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "Error in parsing slave: %s!", value->data); - goto out; - } - - ret = glusterd_get_slave_info (slave, &slave_url, - &slave_host, &slave_vol, &errmsg); - if (ret) { - if (errmsg) - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, - "Unable to fetch slave details. Error: %s", - errmsg); - else - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, - "Unable to fetch slave details."); - ret = -1; - goto out; - } - - ret = snprintf (conf_path, sizeof(conf_path) - 1, - "%s/"GEOREP"/%s_%s_%s/gsyncd.conf", - priv->workdir, param->volinfo->volname, - slave_host, slave_vol); - conf_path[ret] = '\0'; - - ret = glusterd_get_gsync_status_mst_slv(param->volinfo, - slave, conf_path, - param->rsp_dict, - param->node); + glusterd_gsync_status_temp_t *param = NULL; + char *slave = NULL; + char *slave_buf = NULL; + char *slave_url = NULL; + char *slave_vol = NULL; + char *slave_host = NULL; + char *errmsg = NULL; + char conf_path[PATH_MAX] = ""; + int ret = -1; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + char slv_url[VOLINFO_SLAVE_URL_MAX] = {0}; + + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); + + param = (glusterd_gsync_status_temp_t *)data; + + GF_VALIDATE_OR_GOTO(this->name, param, out); + GF_VALIDATE_OR_GOTO(this->name, param->volinfo, out); + + if (this) + priv = this->private; + GF_VALIDATE_OR_GOTO(this->name, priv, out); + + if (snprintf(slv_url, sizeof(slv_url), "%s", value->data) >= + sizeof(slv_url)) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "Error in copying slave: %s!", value->data); + goto out; + } + + ret = parse_slave_url(slv_url, &slave); + if (ret == -1) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "Error in parsing slave: %s!", value->data); + goto out; + } + + ret = glusterd_get_slave_info(slave, &slave_url, &slave_host, &slave_vol, + &errmsg); + if (ret) { + if (errmsg) + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR, + "Unable to fetch slave details. Error: %s", errmsg); + else + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR, + "Unable to fetch slave details."); + ret = -1; + goto out; + } + + ret = snprintf(conf_path, sizeof(conf_path) - 1, + "%s/" GEOREP "/%s_%s_%s/gsyncd.conf", priv->workdir, + param->volinfo->volname, slave_host, slave_vol); + conf_path[ret] = '\0'; + + ret = glusterd_get_gsync_status_mst_slv(param->volinfo, slave, conf_path, + param->rsp_dict, param->node); out: - if (errmsg) - GF_FREE (errmsg); + if (errmsg) + GF_FREE(errmsg); - if (slave_buf) - GF_FREE(slave_buf); + if (slave_buf) + GF_FREE(slave_buf); - if (slave_vol) - GF_FREE (slave_vol); + if (slave_vol) + GF_FREE(slave_vol); - if (slave_url) - GF_FREE (slave_url); + if (slave_url) + GF_FREE(slave_url); - if (slave_host) - GF_FREE (slave_host); + if (slave_host) + GF_FREE(slave_host); - gf_msg_debug (this->name, 0, "Returning %d.", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d.", ret); + return ret; } - static int -_get_max_gsync_slave_num (dict_t *dict, char *key, data_t *value, void *data) +_get_max_gsync_slave_num(dict_t *dict, char *key, data_t *value, void *data) { - int tmp_slvnum = 0; - int *slvnum = (int *)data; + int tmp_slvnum = 0; + int *slvnum = (int *)data; - sscanf (key, "slave%d", &tmp_slvnum); - if (tmp_slvnum > *slvnum) - *slvnum = tmp_slvnum; + sscanf(key, "slave%d", &tmp_slvnum); + if (tmp_slvnum > *slvnum) + *slvnum = tmp_slvnum; - return 0; + return 0; } static int -_get_slave_idx_slave_voluuid (dict_t *dict, char *key, data_t *value, - void *data) +_get_slave_idx_slave_voluuid(dict_t *dict, char *key, data_t *value, void *data) { - char *slave_info = NULL; - xlator_t *this = NULL; - struct slave_vol_config *slave_cfg = NULL; - int i = 0; - int ret = -1; - unsigned tmp_slvnum = 0; - - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); + char *slave_info = NULL; + xlator_t *this = NULL; + struct slave_vol_config *slave_cfg = NULL; + int i = 0; + int ret = -1; + unsigned tmp_slvnum = 0; - slave_cfg = data; - - if (value) - slave_info = value->data; - - if (!(slave_info) || strlen (slave_info) == 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_INVALID_SLAVE, - "Invalid slave in dict"); - ret = -2; - goto out; - } - - /* slave format: - * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */ - while (i++ < 5) { - slave_info = strchr (slave_info, ':'); - if (slave_info) - slave_info++; - else { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "slave_info becomes NULL!"); - ret = -2; - goto out; - } - } - if (strcmp (slave_info, slave_cfg->slave_voluuid) == 0) { - gf_msg_debug (this->name, 0, "Same slave volume " - "already present %s", - slave_cfg->slave_voluuid); - ret = -1; + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); - sscanf (key, "slave%d", &tmp_slvnum); - slave_cfg->old_slvidx = tmp_slvnum; + slave_cfg = data; - gf_msg_debug (this->name, 0, "and " - "its index is: %d", tmp_slvnum); - goto out; - } + if (value) + slave_info = value->data; - ret = 0; + if (!(slave_info) || strlen(slave_info) == 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_SLAVE, + "Invalid slave in dict"); + ret = -2; + goto out; + } + + /* slave format: + * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */ + while (i++ < 5) { + slave_info = strchr(slave_info, ':'); + if (slave_info) + slave_info++; + else { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "slave_info becomes NULL!"); + ret = -2; + goto out; + } + } + if (strcmp(slave_info, slave_cfg->slave_voluuid) == 0) { + gf_msg_debug(this->name, 0, + "Same slave volume " + "already present %s", + slave_cfg->slave_voluuid); + ret = -1; + + sscanf(key, "slave%d", &tmp_slvnum); + slave_cfg->old_slvidx = tmp_slvnum; + + gf_msg_debug(this->name, 0, + "and " + "its index is: %d", + tmp_slvnum); + goto out; + } + + ret = 0; out: - return ret; + return ret; } static int -glusterd_remove_slave_in_info (glusterd_volinfo_t *volinfo, char *slave, - char **op_errstr) +glusterd_remove_slave_in_info(glusterd_volinfo_t *volinfo, char *slave, + char **op_errstr) { - int zero_slave_entries = _gf_true; - int ret = 0; - char *slavekey = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (volinfo); - GF_ASSERT (slave); - - do { - ret = glusterd_get_slave (volinfo, slave, &slavekey); - if (ret < 0 && zero_slave_entries) { - ret++; - goto out; - } - zero_slave_entries = _gf_false; - dict_del (volinfo->gsync_slaves, slavekey); - } while (ret >= 0); - - ret = glusterd_store_volinfo (volinfo, - GLUSTERD_VOLINFO_VER_AC_INCREMENT); - if (ret) { - *op_errstr = gf_strdup ("Failed to store the Volume" - "information"); - goto out; - } - out: - gf_msg_debug (this->name, 0, "returning %d", ret); - return ret; - + int zero_slave_entries = _gf_true; + int ret = 0; + char *slavekey = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(volinfo); + GF_ASSERT(slave); + + do { + ret = glusterd_get_slave(volinfo, slave, &slavekey); + if (ret < 0 && zero_slave_entries) { + ret++; + goto out; + } + zero_slave_entries = _gf_false; + dict_del(volinfo->gsync_slaves, slavekey); + } while (ret >= 0); + + ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); + if (ret) { + *op_errstr = gf_strdup( + "Failed to store the Volume" + "information"); + goto out; + } +out: + gf_msg_debug(this->name, 0, "returning %d", ret); + return ret; } static int -glusterd_gsync_get_uuid (char *slave, glusterd_volinfo_t *vol, - uuid_t uuid) +glusterd_gsync_get_uuid(char *slave, glusterd_volinfo_t *vol, uuid_t uuid) { - int ret = 0; - char *slavekey = NULL; - char *slaveentry = NULL; - char *t = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (vol); - GF_ASSERT (slave); - - ret = glusterd_get_slave (vol, slave, &slavekey); - if (ret < 0) { - /* XXX colliding cases of failure and non-extant - * slave... now just doing this as callers of this - * function can make sense only of -1 and 0 as retvals; - * getting at the proper semanticals will involve - * fixing callers as well. - */ - ret = -1; - goto out; - } + int ret = 0; + char *slavekey = NULL; + char *slaveentry = NULL; + char *t = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(vol); + GF_ASSERT(slave); + + ret = glusterd_get_slave(vol, slave, &slavekey); + if (ret < 0) { + /* XXX colliding cases of failure and non-extant + * slave... now just doing this as callers of this + * function can make sense only of -1 and 0 as retvals; + * getting at the proper semanticals will involve + * fixing callers as well. + */ + ret = -1; + goto out; + } - ret = dict_get_str (vol->gsync_slaves, slavekey, &slaveentry); - GF_ASSERT (ret == 0); + ret = dict_get_str(vol->gsync_slaves, slavekey, &slaveentry); + GF_ASSERT(ret == 0); - t = strchr (slaveentry, ':'); - GF_ASSERT (t); - *t = '\0'; - ret = gf_uuid_parse (slaveentry, uuid); - *t = ':'; + t = strchr(slaveentry, ':'); + GF_ASSERT(t); + *t = '\0'; + ret = gf_uuid_parse(slaveentry, uuid); + *t = ':'; - out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; +out: + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } static int -update_slave_voluuid (dict_t *dict, char *key, data_t *value, void *data) +update_slave_voluuid(dict_t *dict, char *key, data_t *value, void *data) { - char *slave = NULL; - char *slave_url = NULL; - char *slave_vol = NULL; - char *slave_host = NULL; - char *errmsg = NULL; - xlator_t *this = NULL; - int ret = -1; - char slv_url[VOLINFO_SLAVE_URL_MAX] = {0}; - char slave_voluuid[GF_UUID_BUF_SIZE] = {0}; - char *slave_info = NULL; - char *new_value = NULL; - char *same_key = NULL; - int cnt = 0; - gf_boolean_t *voluuid_updated = NULL; - - this = THIS; - - voluuid_updated = data; - slave_info = value->data; - gf_msg_debug (this->name, 0, "slave_info: %s!", slave_info); + char *slave = NULL; + char *slave_url = NULL; + char *slave_vol = NULL; + char *slave_host = NULL; + char *errmsg = NULL; + xlator_t *this = NULL; + int ret = -1; + char slv_url[VOLINFO_SLAVE_URL_MAX] = {0}; + char slave_voluuid[GF_UUID_BUF_SIZE] = {0}; + char *slave_info = NULL; + char *new_value = NULL; + char *same_key = NULL; + int cnt = 0; + gf_boolean_t *voluuid_updated = NULL; + + this = THIS; + + voluuid_updated = data; + slave_info = value->data; + gf_msg_debug(this->name, 0, "slave_info: %s!", slave_info); + + /* old slave format: + * master_node_uuid:ssh://slave_host::slave_vol + * New slave format: + * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */ + while (slave_info) { + slave_info = strchr(slave_info, ':'); + if (slave_info) + cnt++; + else + break; - /* old slave format: - * master_node_uuid:ssh://slave_host::slave_vol - * New slave format: - * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */ - while (slave_info) { - slave_info = strchr (slave_info, ':'); - if (slave_info) - cnt++; - else - break; + slave_info++; + } - slave_info++; + gf_msg_debug(this->name, 0, "cnt: %d", cnt); + /* check whether old slave format and update vol uuid if old format. + * With volume uuid, number of ':' is 5 and is 4 without. + */ + if (cnt == 4) { + if (snprintf(slv_url, sizeof(slv_url), "%s", value->data) >= + sizeof(slv_url)) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "Error in copying slave: %s!", value->data); + goto out; } - gf_msg_debug (this->name, 0, "cnt: %d", cnt); - /* check whether old slave format and update vol uuid if old format. - * With volume uuid, number of ':' is 5 and is 4 without. - */ - if (cnt == 4) { - if (snprintf (slv_url, sizeof(slv_url), "%s", value->data) >= - sizeof (slv_url)) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "Error in copying slave: %s!", value->data); - goto out; - } - - ret = parse_slave_url (slv_url, &slave); - if (ret == -1) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "Error in parsing slave: %s!", value->data); - goto out; - } - - ret = glusterd_get_slave_info (slave, &slave_url, - &slave_host, &slave_vol, &errmsg); - if (ret) { - if (errmsg) - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, - "Unable to fetch slave details. Error: %s", - errmsg); - else - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, - "Unable to fetch slave details."); - ret = -1; - goto out; - } - - ret = glusterd_get_slave_voluuid (slave_host, slave_vol, - slave_voluuid); - if ((ret) || (strlen(slave_voluuid) == 0)) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_REMOTE_VOL_UUID_FAIL, - "Unable to get remote volume uuid" - "slavehost:%s slavevol:%s", - slave_host, slave_vol); - /* Avoiding failure due to remote vol uuid fetch */ - ret = 0; - goto out; - } - ret = gf_asprintf (&new_value, "%s:%s", - value->data, slave_voluuid); - ret = gf_asprintf (&same_key, "%s", key); - - /* delete old key and add new value */ - dict_del (dict, key); + ret = parse_slave_url(slv_url, &slave); + if (ret == -1) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "Error in parsing slave: %s!", value->data); + goto out; + } - /* set new value for the same key*/ - ret = dict_set_dynstr (dict, same_key, new_value); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_REMOTE_VOL_UUID_FAIL, - "Error in setting dict value" - "new_value :%s", new_value); - goto out; - } - *voluuid_updated = _gf_true; + ret = glusterd_get_slave_info(slave, &slave_url, &slave_host, + &slave_vol, &errmsg); + if (ret) { + if (errmsg) + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_SLAVEINFO_FETCH_ERROR, + "Unable to fetch slave details. Error: %s", errmsg); + else + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_SLAVEINFO_FETCH_ERROR, + "Unable to fetch slave details."); + ret = -1; + goto out; + } + + ret = glusterd_get_slave_voluuid(slave_host, slave_vol, slave_voluuid); + if ((ret) || (strlen(slave_voluuid) == 0)) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL, + "Unable to get remote volume uuid" + "slavehost:%s slavevol:%s", + slave_host, slave_vol); + /* Avoiding failure due to remote vol uuid fetch */ + ret = 0; + goto out; + } + ret = gf_asprintf(&new_value, "%s:%s", value->data, slave_voluuid); + ret = gf_asprintf(&same_key, "%s", key); + + /* delete old key and add new value */ + dict_del(dict, key); + + /* set new value for the same key*/ + ret = dict_set_dynstr(dict, same_key, new_value); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL, + "Error in setting dict value" + "new_value :%s", + new_value); + goto out; } + *voluuid_updated = _gf_true; + } - ret = 0; + ret = 0; out: - if (errmsg) - GF_FREE (errmsg); + if (errmsg) + GF_FREE(errmsg); - if (slave_url) - GF_FREE (slave_url); + if (slave_url) + GF_FREE(slave_url); - if (slave_vol) - GF_FREE (slave_vol); + if (slave_vol) + GF_FREE(slave_vol); - if (slave_host) - GF_FREE (slave_host); + if (slave_host) + GF_FREE(slave_host); - gf_msg_debug (this->name, 0, "Returning %d.", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d.", ret); + return ret; } static int -glusterd_update_slave_voluuid_slaveinfo (glusterd_volinfo_t *volinfo) +glusterd_update_slave_voluuid_slaveinfo(glusterd_volinfo_t *volinfo) { - int ret = -1; - xlator_t *this = NULL; - gf_boolean_t voluuid_updated = _gf_false; - - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); - GF_VALIDATE_OR_GOTO (this->name, volinfo, out); - - ret = dict_foreach (volinfo->gsync_slaves, update_slave_voluuid, - &voluuid_updated); + int ret = -1; + xlator_t *this = NULL; + gf_boolean_t voluuid_updated = _gf_false; + + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); + GF_VALIDATE_OR_GOTO(this->name, volinfo, out); + + ret = dict_foreach(volinfo->gsync_slaves, update_slave_voluuid, + &voluuid_updated); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL, + "Error in updating" + "volinfo"); + goto out; + } + + if (_gf_true == voluuid_updated) { + ret = glusterd_store_volinfo(volinfo, + GLUSTERD_VOLINFO_VER_AC_INCREMENT); if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_REMOTE_VOL_UUID_FAIL, "Error in updating" - "volinfo"); - goto out; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL, + "Error in storing" + "volinfo"); + goto out; } + } - if (_gf_true == voluuid_updated) { - ret = glusterd_store_volinfo (volinfo, - GLUSTERD_VOLINFO_VER_AC_INCREMENT); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_VOLINFO_STORE_FAIL, "Error in storing" - "volinfo"); - goto out; - } - } - - ret = 0; + ret = 0; out: - gf_msg_debug ((this ? this->name : "glusterd"), - 0, "Returning %d", ret); - return ret; + gf_msg_debug((this ? this->name : "glusterd"), 0, "Returning %d", ret); + return ret; } int -glusterd_check_gsync_running_local (char *master, char *slave, - char *conf_path, - gf_boolean_t *is_run) +glusterd_check_gsync_running_local(char *master, char *slave, char *conf_path, + gf_boolean_t *is_run) { - int ret = -1; - int ret_status = 0; - gf_boolean_t is_template_in_use = _gf_false; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (master); - GF_ASSERT (slave); - GF_ASSERT (is_run); - - *is_run = _gf_false; - ret = gsync_status (master, slave, conf_path, - &ret_status, &is_template_in_use); - if (ret == 0 && ret_status == 0) - *is_run = _gf_true; - else if (ret == -1) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_VALIDATE_FAILED, - GEOREP" validation failed"); - goto out; - } - ret = 0; - out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; - + int ret = -1; + int ret_status = 0; + gf_boolean_t is_template_in_use = _gf_false; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(master); + GF_ASSERT(slave); + GF_ASSERT(is_run); + + *is_run = _gf_false; + ret = gsync_status(master, slave, conf_path, &ret_status, + &is_template_in_use); + if (ret == 0 && ret_status == 0) + *is_run = _gf_true; + else if (ret == -1) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VALIDATE_FAILED, + GEOREP " validation failed"); + goto out; + } + ret = 0; +out: + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } static int -glusterd_store_slave_in_info (glusterd_volinfo_t *volinfo, char *slave, - char *host_uuid, char *slave_voluuid, - char **op_errstr, gf_boolean_t is_force) +glusterd_store_slave_in_info(glusterd_volinfo_t *volinfo, char *slave, + char *host_uuid, char *slave_voluuid, + char **op_errstr, gf_boolean_t is_force) { - int ret = 0; - int maxslv = 0; - char **linearr = NULL; - char *value = NULL; - char *slavekey = NULL; - char *slaveentry = NULL; - char key[512] = {0, }; - char *t = NULL; - xlator_t *this = NULL; - struct slave_vol_config slave1 = {{0},}; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (volinfo); - GF_ASSERT (slave); - GF_ASSERT (host_uuid); - GF_VALIDATE_OR_GOTO (this->name, slave_voluuid, out); - - ret = glusterd_get_slave (volinfo, slave, &slavekey); - switch (ret) { + int ret = 0; + int maxslv = 0; + char **linearr = NULL; + char *value = NULL; + char *slavekey = NULL; + char *slaveentry = NULL; + char key[512] = { + 0, + }; + char *t = NULL; + xlator_t *this = NULL; + struct slave_vol_config slave1 = { + {0}, + }; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(volinfo); + GF_ASSERT(slave); + GF_ASSERT(host_uuid); + GF_VALIDATE_OR_GOTO(this->name, slave_voluuid, out); + + ret = glusterd_get_slave(volinfo, slave, &slavekey); + switch (ret) { case -2: - ret = -1; - goto out; + ret = -1; + goto out; case -1: - break; + break; default: - if (!is_force) - GF_ASSERT (ret > 0); - ret = dict_get_str (volinfo->gsync_slaves, slavekey, &slaveentry); - GF_ASSERT (ret == 0); - - /* same-name + same-uuid slave entries should have been filtered - * out in glusterd_op_verify_gsync_start_options(), so we can - * assert an uuid mismatch - */ - t = strtail (slaveentry, host_uuid); - if (!is_force) - GF_ASSERT (!t || *t != ':'); - - if (is_force) { - gf_msg_debug (this->name, 0, GEOREP" has already " - "been invoked for the %s (master) and " - "%s (slave). Allowing without saving " - "info again due to force command.", - volinfo->volname, slave); - ret = 0; - goto out; - } - - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_INVOKE_ERROR, - GEOREP" has already been invoked for " - "the %s (master) and %s (slave) from a different " - "machine", volinfo->volname, slave); - *op_errstr = gf_strdup (GEOREP" already running in " - "another machine"); - ret = -1; - goto out; - } - - ret = glusterd_urltransform_single (slave, "normalize", &linearr); - if (ret == -1) - goto out; - - ret = gf_asprintf (&value, "%s:%s:%s", host_uuid, - linearr[0], slave_voluuid); - - glusterd_urltransform_free (linearr, 1); - if (ret == -1) + if (!is_force) + GF_ASSERT(ret > 0); + ret = dict_get_str(volinfo->gsync_slaves, slavekey, &slaveentry); + GF_ASSERT(ret == 0); + + /* same-name + same-uuid slave entries should have been filtered + * out in glusterd_op_verify_gsync_start_options(), so we can + * assert an uuid mismatch + */ + t = strtail(slaveentry, host_uuid); + if (!is_force) + GF_ASSERT(!t || *t != ':'); + + if (is_force) { + gf_msg_debug(this->name, 0, + GEOREP + " has already " + "been invoked for the %s (master) and " + "%s (slave). Allowing without saving " + "info again due to force command.", + volinfo->volname, slave); + ret = 0; goto out; + } - /* Given the slave volume uuid, check and get any existing slave */ - memcpy (slave1.slave_voluuid, slave_voluuid, GF_UUID_BUF_SIZE); - ret = dict_foreach (volinfo->gsync_slaves, - _get_slave_idx_slave_voluuid, &slave1); - - if (ret == 0) { /* New slave */ - dict_foreach (volinfo->gsync_slaves, _get_max_gsync_slave_num, - &maxslv); - snprintf (key, sizeof (key), "slave%d", maxslv + 1); - - ret = dict_set_dynstr (volinfo->gsync_slaves, key, value); - if (ret) { - GF_FREE (value); - goto out; - } - } else if (ret == -1) { /* Existing slave */ - snprintf (key, sizeof (key), "slave%d", slave1.old_slvidx); - - gf_msg_debug (this->name, 0, "Replacing key:%s with new value" - ":%s", key, value); - - /* Add new slave's value, with the same slave index */ - ret = dict_set_dynstr (volinfo->gsync_slaves, key, value); - if (ret) { - GF_FREE (value); - goto out; - } - } else { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_REMOTE_VOL_UUID_FAIL, - "_get_slave_idx_slave_voluuid failed!"); - GF_FREE (value); - ret = -1; - goto out; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVOKE_ERROR, + GEOREP + " has already been invoked for " + "the %s (master) and %s (slave) from a different " + "machine", + volinfo->volname, slave); + *op_errstr = gf_strdup(GEOREP + " already running in " + "another machine"); + ret = -1; + goto out; + } + + ret = glusterd_urltransform_single(slave, "normalize", &linearr); + if (ret == -1) + goto out; + + ret = gf_asprintf(&value, "%s:%s:%s", host_uuid, linearr[0], slave_voluuid); + + glusterd_urltransform_free(linearr, 1); + if (ret == -1) + goto out; + + /* Given the slave volume uuid, check and get any existing slave */ + memcpy(slave1.slave_voluuid, slave_voluuid, GF_UUID_BUF_SIZE); + ret = dict_foreach(volinfo->gsync_slaves, _get_slave_idx_slave_voluuid, + &slave1); + + if (ret == 0) { /* New slave */ + dict_foreach(volinfo->gsync_slaves, _get_max_gsync_slave_num, &maxslv); + snprintf(key, sizeof(key), "slave%d", maxslv + 1); + + ret = dict_set_dynstr(volinfo->gsync_slaves, key, value); + if (ret) { + GF_FREE(value); + goto out; } + } else if (ret == -1) { /* Existing slave */ + snprintf(key, sizeof(key), "slave%d", slave1.old_slvidx); + + gf_msg_debug(this->name, 0, + "Replacing key:%s with new value" + ":%s", + key, value); - ret = glusterd_store_volinfo (volinfo, - GLUSTERD_VOLINFO_VER_AC_INCREMENT); + /* Add new slave's value, with the same slave index */ + ret = dict_set_dynstr(volinfo->gsync_slaves, key, value); if (ret) { - *op_errstr = gf_strdup ("Failed to store the Volume " - "information"); - goto out; - } - ret = 0; - out: - return ret; + GF_FREE(value); + goto out; + } + } else { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL, + "_get_slave_idx_slave_voluuid failed!"); + GF_FREE(value); + ret = -1; + goto out; + } + + ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); + if (ret) { + *op_errstr = gf_strdup( + "Failed to store the Volume " + "information"); + goto out; + } + ret = 0; +out: + return ret; } static int -glusterd_op_verify_gsync_start_options (glusterd_volinfo_t *volinfo, - char *slave, char *conf_path, - char *statefile, char **op_errstr, - gf_boolean_t is_force) +glusterd_op_verify_gsync_start_options(glusterd_volinfo_t *volinfo, char *slave, + char *conf_path, char *statefile, + char **op_errstr, gf_boolean_t is_force) { - int ret = -1; - int ret_status = 0; - gf_boolean_t is_template_in_use = _gf_false; - char msg[2048] = {0}; - uuid_t uuid = {0}; - xlator_t *this = NULL; - struct stat stbuf = {0,}; - char statefiledir[PATH_MAX] = {0,}; - char *statedir = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (volinfo); - GF_ASSERT (slave); - GF_ASSERT (op_errstr); - GF_ASSERT (conf_path); - GF_ASSERT (this && this->private); - - if (GLUSTERD_STATUS_STARTED != volinfo->status) { - snprintf (msg, sizeof (msg), "Volume %s needs to be started " - "before "GEOREP" start", volinfo->volname); - goto out; - } - - /* check session directory as statefile may not present - * during upgrade */ - if (snprintf (statefiledir, sizeof (statefiledir), "%s", statefile) >= - sizeof (statefiledir)) { - snprintf (msg, sizeof (msg), "statefiledir truncated"); - gf_msg (this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, - "%s", msg); - *op_errstr = gf_strdup (msg); - goto out; - } - statedir = dirname (statefiledir); - - ret = sys_lstat (statedir, &stbuf); - if (ret) { - snprintf (msg, sizeof (msg), "Session between %s and %s has" - " not been created. Please create session and retry.", - volinfo->volname, slave); - gf_msg (this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, - "%s statefile: %s", msg, statefile); - *op_errstr = gf_strdup (msg); - goto out; - } - - /* Check if the gsync slave info is stored. If not - * session has not been created */ - ret = glusterd_gsync_get_uuid (slave, volinfo, uuid); - if (ret) { - snprintf (msg, sizeof (msg), "Session between %s and %s has" - " not been created. Please create session and retry.", - volinfo->volname, slave); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SESSION_CREATE_ERROR, - "%s", msg); - goto out; - } - - /*Check if the gsync is already started in cmd. inited host - * If so initiate add it into the glusterd's priv*/ - ret = gsync_status (volinfo->volname, slave, conf_path, - &ret_status, &is_template_in_use); - if (ret == 0) { - if ((ret_status == 0) && !is_force) { - snprintf (msg, sizeof (msg), GEOREP " session between" - " %s & %s already started", volinfo->volname, - slave); - ret = -1; - goto out; - } - } else if (ret == -1) { - snprintf (msg, sizeof (msg), GEOREP" start option " - "validation failed "); - goto out; - } - - if (is_template_in_use == _gf_true) { - snprintf (msg, sizeof (msg), GEOREP" start " - "failed : pid-file entry missing " - "in config file."); - ret = -1; - goto out; - } - - ret = glusterd_verify_gsyncd_spawn (volinfo->volname, slave); - if (ret && !is_force) { - snprintf (msg, sizeof (msg), "Unable to spawn gsyncd"); - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED, - "%s", msg); - } + int ret = -1; + int ret_status = 0; + gf_boolean_t is_template_in_use = _gf_false; + char msg[2048] = {0}; + uuid_t uuid = {0}; + xlator_t *this = NULL; + struct stat stbuf = { + 0, + }; + char statefiledir[PATH_MAX] = { + 0, + }; + char *statedir = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(volinfo); + GF_ASSERT(slave); + GF_ASSERT(op_errstr); + GF_ASSERT(conf_path); + GF_ASSERT(this && this->private); + + if (GLUSTERD_STATUS_STARTED != volinfo->status) { + snprintf(msg, sizeof(msg), + "Volume %s needs to be started " + "before " GEOREP " start", + volinfo->volname); + goto out; + } + + /* check session directory as statefile may not present + * during upgrade */ + if (snprintf(statefiledir, sizeof(statefiledir), "%s", statefile) >= + sizeof(statefiledir)) { + snprintf(msg, sizeof(msg), "statefiledir truncated"); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, "%s", + msg); + *op_errstr = gf_strdup(msg); + goto out; + } + statedir = dirname(statefiledir); + + ret = sys_lstat(statedir, &stbuf); + if (ret) { + snprintf(msg, sizeof(msg), + "Session between %s and %s has" + " not been created. Please create session and retry.", + volinfo->volname, slave); + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, + "%s statefile: %s", msg, statefile); + *op_errstr = gf_strdup(msg); + goto out; + } + + /* Check if the gsync slave info is stored. If not + * session has not been created */ + ret = glusterd_gsync_get_uuid(slave, volinfo, uuid); + if (ret) { + snprintf(msg, sizeof(msg), + "Session between %s and %s has" + " not been created. Please create session and retry.", + volinfo->volname, slave); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_CREATE_ERROR, "%s", + msg); + goto out; + } + + /*Check if the gsync is already started in cmd. inited host + * If so initiate add it into the glusterd's priv*/ + ret = gsync_status(volinfo->volname, slave, conf_path, &ret_status, + &is_template_in_use); + if (ret == 0) { + if ((ret_status == 0) && !is_force) { + snprintf(msg, sizeof(msg), + GEOREP + " session between" + " %s & %s already started", + volinfo->volname, slave); + ret = -1; + goto out; + } + } else if (ret == -1) { + snprintf(msg, sizeof(msg), + GEOREP + " start option " + "validation failed "); + goto out; + } + + if (is_template_in_use == _gf_true) { + snprintf(msg, sizeof(msg), + GEOREP + " start " + "failed : pid-file entry missing " + "in config file."); + ret = -1; + goto out; + } + + ret = glusterd_verify_gsyncd_spawn(volinfo->volname, slave); + if (ret && !is_force) { + snprintf(msg, sizeof(msg), "Unable to spawn gsyncd"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED, "%s", + msg); + } out: - if (ret && (msg[0] != '\0')) { - *op_errstr = gf_strdup (msg); - } - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + if (ret && (msg[0] != '\0')) { + *op_errstr = gf_strdup(msg); + } + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } void -glusterd_check_geo_rep_configured (glusterd_volinfo_t *volinfo, - gf_boolean_t *flag) +glusterd_check_geo_rep_configured(glusterd_volinfo_t *volinfo, + gf_boolean_t *flag) { + GF_ASSERT(volinfo); + GF_ASSERT(flag); - GF_ASSERT (volinfo); - GF_ASSERT (flag); - - if (volinfo->gsync_slaves->count) - *flag = _gf_true; - else - *flag = _gf_false; + if (volinfo->gsync_slaves->count) + *flag = _gf_true; + else + *flag = _gf_false; - return; + return; } /* @@ -1974,70 +1961,70 @@ glusterd_check_geo_rep_configured (glusterd_volinfo_t *volinfo, */ static int -is_geo_rep_active (glusterd_volinfo_t *volinfo, char *slave, - char *conf_path, int *is_active) +is_geo_rep_active(glusterd_volinfo_t *volinfo, char *slave, char *conf_path, + int *is_active) { - dict_t *confd = NULL; - char *statefile = NULL; - char *master = NULL; - char monitor_status[PATH_MAX] = ""; - int ret = -1; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - master = volinfo->volname; - - confd = dict_new (); - if (!confd) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, - "Not able to create dict."); - goto out; - } - - ret = glusterd_gsync_get_config (master, slave, conf_path, - confd); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GET_CONFIG_INFO_FAILED, - "Unable to get configuration data " - "for %s(master), %s(slave)", master, slave); - ret = -1; - goto out; - } - - ret = dict_get_param (confd, "state_file", &statefile); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get state_file's name " - "for %s(master), %s(slave). Please check gsync " - "config file.", master, slave); - ret = -1; - goto out; - } - - ret = glusterd_gsync_read_frm_status (statefile, monitor_status, - sizeof (monitor_status)); - if (ret <= 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_STAT_FILE_READ_FAILED, - "Unable to read the status file for %s(master), " - "%s(slave)", master, slave); - snprintf (monitor_status, sizeof (monitor_status), "defunct"); - } - - if ((!strcmp(monitor_status, "Stopped")) || - (!strcmp(monitor_status, "Created"))) { - *is_active = 0; - } else { - *is_active = 1; - } - ret = 0; + dict_t *confd = NULL; + char *statefile = NULL; + char *master = NULL; + char monitor_status[PATH_MAX] = ""; + int ret = -1; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + master = volinfo->volname; + + confd = dict_new(); + if (!confd) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, + "Not able to create dict."); + goto out; + } + + ret = glusterd_gsync_get_config(master, slave, conf_path, confd); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED, + "Unable to get configuration data " + "for %s(master), %s(slave)", + master, slave); + ret = -1; + goto out; + } + + ret = dict_get_param(confd, "state_file", &statefile); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get state_file's name " + "for %s(master), %s(slave). Please check gsync " + "config file.", + master, slave); + ret = -1; + goto out; + } + + ret = glusterd_gsync_read_frm_status(statefile, monitor_status, + sizeof(monitor_status)); + if (ret <= 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STAT_FILE_READ_FAILED, + "Unable to read the status file for %s(master), " + "%s(slave)", + master, slave); + snprintf(monitor_status, sizeof(monitor_status), "defunct"); + } + + if ((!strcmp(monitor_status, "Stopped")) || + (!strcmp(monitor_status, "Created"))) { + *is_active = 0; + } else { + *is_active = 1; + } + ret = 0; out: - if (confd) - dict_unref (confd); - return ret; + if (confd) + dict_unref(confd); + return ret; } /* @@ -2053,89 +2040,86 @@ out: */ int -_get_slave_status (dict_t *dict, char *key, data_t *value, void *data) +_get_slave_status(dict_t *dict, char *key, data_t *value, void *data) { - gsync_status_param_t *param = NULL; - char *slave = NULL; - char *slave_url = NULL; - char *slave_vol = NULL; - char *slave_host = NULL; - char *errmsg = NULL; - char conf_path[PATH_MAX] = ""; - int ret = -1; - glusterd_conf_t *priv = NULL; - xlator_t *this = NULL; - - param = (gsync_status_param_t *)data; - - GF_ASSERT (param); - GF_ASSERT (param->volinfo); - if (param->is_active) { - ret = 0; - goto out; - } - - this = THIS; - GF_ASSERT (this); + gsync_status_param_t *param = NULL; + char *slave = NULL; + char *slave_url = NULL; + char *slave_vol = NULL; + char *slave_host = NULL; + char *errmsg = NULL; + char conf_path[PATH_MAX] = ""; + int ret = -1; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + + param = (gsync_status_param_t *)data; + + GF_ASSERT(param); + GF_ASSERT(param->volinfo); + if (param->is_active) { + ret = 0; + goto out; + } - priv = this->private; - if (priv == NULL) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GLUSTERD_PRIV_NOT_FOUND, - "priv of glusterd not present"); - goto out; - } + this = THIS; + GF_ASSERT(this); - slave = strchr (value->data, ':'); - if (!slave) { - ret = 0; - goto out; - } - slave++; + priv = this->private; + if (priv == NULL) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND, + "priv of glusterd not present"); + goto out; + } - ret = glusterd_get_slave_info (slave, &slave_url, - &slave_host, &slave_vol, &errmsg); - if (ret) { - if (errmsg) - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, "Unable to fetch" - " slave details. Error: %s", errmsg); - else - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, - "Unable to fetch slave details."); - ret = -1; - goto out; - } - - ret = snprintf (conf_path, sizeof(conf_path) - 1, - "%s/"GEOREP"/%s_%s_%s/gsyncd.conf", - priv->workdir, param->volinfo->volname, - slave_host, slave_vol); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_CONF_PATH_ASSIGN_FAILED, - "Unable to assign conf_path."); - ret = -1; - goto out; - } - conf_path[ret] = '\0'; + slave = strchr(value->data, ':'); + if (!slave) { + ret = 0; + goto out; + } + slave++; - ret = is_geo_rep_active (param->volinfo,slave, conf_path, - ¶m->is_active); -out: + ret = glusterd_get_slave_info(slave, &slave_url, &slave_host, &slave_vol, + &errmsg); + if (ret) { if (errmsg) - GF_FREE (errmsg); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR, + "Unable to fetch" + " slave details. Error: %s", + errmsg); + else + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR, + "Unable to fetch slave details."); + ret = -1; + goto out; + } + + ret = snprintf(conf_path, sizeof(conf_path) - 1, + "%s/" GEOREP "/%s_%s_%s/gsyncd.conf", priv->workdir, + param->volinfo->volname, slave_host, slave_vol); + if (ret < 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CONF_PATH_ASSIGN_FAILED, + "Unable to assign conf_path."); + ret = -1; + goto out; + } + conf_path[ret] = '\0'; + + ret = is_geo_rep_active(param->volinfo, slave, conf_path, + ¶m->is_active); +out: + if (errmsg) + GF_FREE(errmsg); - if (slave_vol) - GF_FREE (slave_vol); + if (slave_vol) + GF_FREE(slave_vol); - if (slave_url) - GF_FREE (slave_url); - if (slave_host) - GF_FREE (slave_host); + if (slave_url) + GF_FREE(slave_url); + if (slave_host) + GF_FREE(slave_host); - return ret; + return ret; } /* glusterd_check_geo_rep_running: @@ -2149,909 +2133,924 @@ out: */ int -glusterd_check_geo_rep_running (gsync_status_param_t *param, char **op_errstr) +glusterd_check_geo_rep_running(gsync_status_param_t *param, char **op_errstr) { - char msg[2048] = {0,}; - gf_boolean_t enabled = _gf_false; - int ret = 0; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - GF_ASSERT (param); - GF_ASSERT (param->volinfo); - GF_ASSERT (op_errstr); - - glusterd_check_geo_rep_configured (param->volinfo, &enabled); - - if (enabled) { - ret = dict_foreach (param->volinfo->gsync_slaves, - _get_slave_status, param); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, - "_get_slave_satus failed"); - snprintf (msg, sizeof(msg), GEOREP" Unable to" - " get the status of active "GEOREP"" - " session for the volume '%s'.\n" - " Please check the log file for" - " more info.", param->volinfo->volname); - *op_errstr = gf_strdup (msg); - ret = -1; - goto out; - } + char msg[2048] = { + 0, + }; + gf_boolean_t enabled = _gf_false; + int ret = 0; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + GF_ASSERT(param); + GF_ASSERT(param->volinfo); + GF_ASSERT(op_errstr); + + glusterd_check_geo_rep_configured(param->volinfo, &enabled); + + if (enabled) { + ret = dict_foreach(param->volinfo->gsync_slaves, _get_slave_status, + param); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR, + "_get_slave_satus failed"); + snprintf(msg, sizeof(msg), + GEOREP + " Unable to" + " get the status of active " GEOREP + "" + " session for the volume '%s'.\n" + " Please check the log file for" + " more info.", + param->volinfo->volname); + *op_errstr = gf_strdup(msg); + ret = -1; + goto out; + } - if (param->is_active) { - snprintf (msg, sizeof(msg), GEOREP" sessions" - " are active for the volume %s.\nStop" - " "GEOREP " sessions involved in this" - " volume. Use 'volume "GEOREP - " status' command for more info.", - param->volinfo->volname); - *op_errstr = gf_strdup (msg); - goto out; - } - } - out: - return ret; + if (param->is_active) { + snprintf(msg, sizeof(msg), + GEOREP + " sessions" + " are active for the volume %s.\nStop" + " " GEOREP + " sessions involved in this" + " volume. Use 'volume " GEOREP + " status' command for more info.", + param->volinfo->volname); + *op_errstr = gf_strdup(msg); + goto out; + } + } +out: + return ret; } static int -glusterd_op_verify_gsync_running (glusterd_volinfo_t *volinfo, - char *slave, char *conf_path, - char **op_errstr) +glusterd_op_verify_gsync_running(glusterd_volinfo_t *volinfo, char *slave, + char *conf_path, char **op_errstr) { - int pfd = -1; - int ret = -1; - char msg[2048] = {0}; - char pidfile[PATH_MAX] = {0,}; - gf_boolean_t is_template_in_use = _gf_false; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (THIS && THIS->private); - GF_ASSERT (volinfo); - GF_ASSERT (slave); - GF_ASSERT (conf_path); - GF_ASSERT (op_errstr); - - if (GLUSTERD_STATUS_STARTED != volinfo->status) { - snprintf (msg, sizeof (msg), "Volume %s needs to be started " - "before "GEOREP" start", volinfo->volname); - - goto out; - } - - pfd = gsyncd_getpidfile (volinfo->volname, slave, pidfile, - conf_path, &is_template_in_use); - if (pfd == -2) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED, - GEOREP" stop validation failed for %s & %s", - volinfo->volname, slave); - ret = -1; - goto out; - } - if (gsync_status_byfd (pfd) == -1) { - snprintf (msg, sizeof (msg), GEOREP" session b/w %s & %s is " - "not running on this node.", volinfo->volname, - slave); - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_INACTIVE, - "%s", msg); - ret = -1; - /* monitor gsyncd already dead */ - goto out; - } - - if (is_template_in_use) { - snprintf (msg, sizeof (msg), "pid-file entry missing in " - "the config file(%s).", conf_path); - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND, - "%s", msg); - ret = -1; - goto out; - } - - if (pfd < 0) - goto out; - - ret = 0; + int pfd = -1; + int ret = -1; + char msg[2048] = {0}; + char pidfile[PATH_MAX] = { + 0, + }; + gf_boolean_t is_template_in_use = _gf_false; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(THIS && THIS->private); + GF_ASSERT(volinfo); + GF_ASSERT(slave); + GF_ASSERT(conf_path); + GF_ASSERT(op_errstr); + + if (GLUSTERD_STATUS_STARTED != volinfo->status) { + snprintf(msg, sizeof(msg), + "Volume %s needs to be started " + "before " GEOREP " start", + volinfo->volname); + + goto out; + } + + pfd = gsyncd_getpidfile(volinfo->volname, slave, pidfile, conf_path, + &is_template_in_use); + if (pfd == -2) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED, + GEOREP " stop validation failed for %s & %s", volinfo->volname, + slave); + ret = -1; + goto out; + } + if (gsync_status_byfd(pfd) == -1) { + snprintf(msg, sizeof(msg), + GEOREP + " session b/w %s & %s is " + "not running on this node.", + volinfo->volname, slave); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_INACTIVE, "%s", msg); + ret = -1; + /* monitor gsyncd already dead */ + goto out; + } + + if (is_template_in_use) { + snprintf(msg, sizeof(msg), + "pid-file entry missing in " + "the config file(%s).", + conf_path); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND, "%s", + msg); + ret = -1; + goto out; + } + + if (pfd < 0) + goto out; + + ret = 0; out: - if (ret && (msg[0] != '\0')) { - *op_errstr = gf_strdup (msg); - } - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + if (ret && (msg[0] != '\0')) { + *op_errstr = gf_strdup(msg); + } + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } static int -glusterd_verify_gsync_status_opts (dict_t *dict, char **op_errstr) +glusterd_verify_gsync_status_opts(dict_t *dict, char **op_errstr) { - char *slave = NULL; - char *volname = NULL; - char errmsg[PATH_MAX] = {0, }; - gf_boolean_t exists = _gf_false; - glusterd_volinfo_t *volinfo = NULL; - int ret = 0; - char *conf_path = NULL; - char *slave_url = NULL; - char *slave_host = NULL; - char *slave_vol = NULL; - glusterd_conf_t *priv = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - if (THIS) - priv = THIS->private; - if (priv == NULL) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GLUSTERD_PRIV_NOT_FOUND, - "priv of glusterd not present"); - *op_errstr = gf_strdup ("glusterd defunct"); - goto out; - } - - ret = dict_get_str (dict, "master", &volname); - if (ret < 0) { - ret = 0; - goto out; - } - - exists = glusterd_check_volume_exists (volname); - ret = glusterd_volinfo_find (volname, &volinfo); - if ((ret) || (!exists)) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND, - "volume name does not exist"); - snprintf (errmsg, sizeof(errmsg), "Volume name %s does not" - " exist", volname); - *op_errstr = gf_strdup (errmsg); - ret = -1; - goto out; - } - - ret = dict_get_str (dict, "slave", &slave); - if (ret < 0) { - ret = 0; - goto out; - } - - ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_url, - &slave_host, &slave_vol, - &conf_path, op_errstr); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, - "Unable to fetch slave or confpath details."); - ret = -1; - goto out; - } + char *slave = NULL; + char *volname = NULL; + char errmsg[PATH_MAX] = { + 0, + }; + gf_boolean_t exists = _gf_false; + glusterd_volinfo_t *volinfo = NULL; + int ret = 0; + char *conf_path = NULL; + char *slave_url = NULL; + char *slave_host = NULL; + char *slave_vol = NULL; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + if (THIS) + priv = THIS->private; + if (priv == NULL) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND, + "priv of glusterd not present"); + *op_errstr = gf_strdup("glusterd defunct"); + goto out; + } + + ret = dict_get_str(dict, "master", &volname); + if (ret < 0) { + ret = 0; + goto out; + } + + exists = glusterd_check_volume_exists(volname); + ret = glusterd_volinfo_find(volname, &volinfo); + if ((ret) || (!exists)) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND, + "volume name does not exist"); + snprintf(errmsg, sizeof(errmsg), + "Volume name %s does not" + " exist", + volname); + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; + } + + ret = dict_get_str(dict, "slave", &slave); + if (ret < 0) { + ret = 0; + goto out; + } + + ret = glusterd_get_slave_details_confpath(volinfo, dict, &slave_url, + &slave_host, &slave_vol, + &conf_path, op_errstr); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR, + "Unable to fetch slave or confpath details."); + ret = -1; + goto out; + } out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } - int -glusterd_op_gsync_args_get (dict_t *dict, char **op_errstr, - char **master, char **slave, char **host_uuid) +glusterd_op_gsync_args_get(dict_t *dict, char **op_errstr, char **master, + char **slave, char **host_uuid) { + int ret = -1; + xlator_t *this = NULL; - int ret = -1; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - GF_ASSERT (dict); - GF_ASSERT (op_errstr); + this = THIS; + GF_ASSERT(this); + GF_ASSERT(dict); + GF_ASSERT(op_errstr); - if (master) { - ret = dict_get_str (dict, "master", master); - if (ret < 0) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_DICT_GET_FAILED, "master not found"); - *op_errstr = gf_strdup ("master not found"); - goto out; - } + if (master) { + ret = dict_get_str(dict, "master", master); + if (ret < 0) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, + "master not found"); + *op_errstr = gf_strdup("master not found"); + goto out; } + } - if (slave) { - ret = dict_get_str (dict, "slave", slave); - if (ret < 0) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_DICT_GET_FAILED, "slave not found"); - *op_errstr = gf_strdup ("slave not found"); - goto out; - } + if (slave) { + ret = dict_get_str(dict, "slave", slave); + if (ret < 0) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, + "slave not found"); + *op_errstr = gf_strdup("slave not found"); + goto out; } + } - if (host_uuid) { - ret = dict_get_str (dict, "host-uuid", host_uuid); - if (ret < 0) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_DICT_GET_FAILED, "host_uuid not found"); - *op_errstr = gf_strdup ("host_uuid not found"); - goto out; - } + if (host_uuid) { + ret = dict_get_str(dict, "host-uuid", host_uuid); + if (ret < 0) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, + "host_uuid not found"); + *op_errstr = gf_strdup("host_uuid not found"); + goto out; } + } - ret = 0; + ret = 0; out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } int -glusterd_op_stage_sys_exec (dict_t *dict, char **op_errstr) +glusterd_op_stage_sys_exec(dict_t *dict, char **op_errstr) { - char errmsg[PATH_MAX] = ""; - char *command = NULL; - char command_path[PATH_MAX] = ""; - struct stat st = {0,}; - int ret = -1; - glusterd_conf_t *conf = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - conf = this->private; - GF_ASSERT (conf); - - if (conf->op_version < 2) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION, - "Op Version not supported."); - snprintf (errmsg, sizeof(errmsg), "One or more nodes do not" - " support the required op version."); - *op_errstr = gf_strdup (errmsg); - ret = -1; - goto out; - } - - ret = dict_get_str (dict, "command", &command); - if (ret) { - strcpy (errmsg, "internal error"); - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get command from dict"); - goto out; - } - - /* enforce local occurrence of the command */ - if (strchr (command, '/')) { - strcpy (errmsg, "invalid command name"); - ret = -1; - goto out; - } - - sprintf (command_path, GSYNCD_PREFIX"/peer_%s", command); - /* check if it's executable */ - ret = sys_access (command_path, X_OK); - if (!ret) - /* check if it's a regular file */ - ret = sys_stat (command_path, &st); - if (!ret && !S_ISREG (st.st_mode)) - ret = -1; + char errmsg[PATH_MAX] = ""; + char *command = NULL; + char command_path[PATH_MAX] = ""; + struct stat st = { + 0, + }; + int ret = -1; + glusterd_conf_t *conf = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + conf = this->private; + GF_ASSERT(conf); + + if (conf->op_version < 2) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION, + "Op Version not supported."); + snprintf(errmsg, sizeof(errmsg), + "One or more nodes do not" + " support the required op version."); + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; + } + + ret = dict_get_str(dict, "command", &command); + if (ret) { + strcpy(errmsg, "internal error"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get command from dict"); + goto out; + } + + /* enforce local occurrence of the command */ + if (strchr(command, '/')) { + strcpy(errmsg, "invalid command name"); + ret = -1; + goto out; + } + + sprintf(command_path, GSYNCD_PREFIX "/peer_%s", command); + /* check if it's executable */ + ret = sys_access(command_path, X_OK); + if (!ret) + /* check if it's a regular file */ + ret = sys_stat(command_path, &st); + if (!ret && !S_ISREG(st.st_mode)) + ret = -1; out: - if (ret) { - if (errmsg[0] == '\0') { - if (command) - snprintf (errmsg, sizeof (errmsg), - "gsync peer_%s command not found.", - command); - else - snprintf (errmsg, sizeof (errmsg), "%s", - "gsync peer command was not " - "specified"); - } - *op_errstr = gf_strdup (errmsg); - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_CMD_ERROR, - "%s", errmsg); - } - - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + if (ret) { + if (errmsg[0] == '\0') { + if (command) + snprintf(errmsg, sizeof(errmsg), + "gsync peer_%s command not found.", command); + else + snprintf(errmsg, sizeof(errmsg), "%s", + "gsync peer command was not " + "specified"); + } + *op_errstr = gf_strdup(errmsg); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_CMD_ERROR, "%s", + errmsg); + } + + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } int -glusterd_op_stage_copy_file (dict_t *dict, char **op_errstr) +glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr) { - char abs_filename[PATH_MAX] = ""; - char errmsg[PATH_MAX] = ""; - char *filename = NULL; - char *host_uuid = NULL; - char uuid_str [64] = {0}; - int ret = -1; - glusterd_conf_t *priv = NULL; - struct stat stbuf = {0,}; - xlator_t *this = NULL; - char workdir[PATH_MAX] = {0,}; - char realpath_filename[PATH_MAX] = {0,}; - char realpath_workdir[PATH_MAX] = {0,}; - int32_t len = 0; - - this = THIS; - GF_ASSERT (this); - - if (THIS) - priv = THIS->private; - if (priv == NULL) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GLUSTERD_PRIV_NOT_FOUND, - "priv of glusterd not present"); - *op_errstr = gf_strdup ("glusterd defunct"); - goto out; + char abs_filename[PATH_MAX] = ""; + char errmsg[PATH_MAX] = ""; + char *filename = NULL; + char *host_uuid = NULL; + char uuid_str[64] = {0}; + int ret = -1; + glusterd_conf_t *priv = NULL; + struct stat stbuf = { + 0, + }; + xlator_t *this = NULL; + char workdir[PATH_MAX] = { + 0, + }; + char realpath_filename[PATH_MAX] = { + 0, + }; + char realpath_workdir[PATH_MAX] = { + 0, + }; + int32_t len = 0; + + this = THIS; + GF_ASSERT(this); + + if (THIS) + priv = THIS->private; + if (priv == NULL) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND, + "priv of glusterd not present"); + *op_errstr = gf_strdup("glusterd defunct"); + goto out; + } + + if (priv->op_version < 2) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION, + "Op Version not supported."); + snprintf(errmsg, sizeof(errmsg), + "One or more nodes do not" + " support the required op version."); + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; + } + + ret = dict_get_str(dict, "host-uuid", &host_uuid); + if (ret < 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to fetch host-uuid from dict."); + goto out; + } + + uuid_utoa_r(MY_UUID, uuid_str); + if (!strcmp(uuid_str, host_uuid)) { + ret = dict_get_str(dict, "source", &filename); + if (ret < 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to fetch filename from dict."); + *op_errstr = gf_strdup("command unsuccessful"); + goto out; } - - if (priv->op_version < 2) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION, - "Op Version not supported."); - snprintf (errmsg, sizeof(errmsg), "One or more nodes do not" - " support the required op version."); - *op_errstr = gf_strdup (errmsg); - ret = -1; - goto out; + len = snprintf(abs_filename, sizeof(abs_filename), "%s/%s", + priv->workdir, filename); + if ((len < 0) || (len >= sizeof(abs_filename))) { + ret = -1; + goto out; } - ret = dict_get_str (dict, "host-uuid", &host_uuid); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to fetch host-uuid from dict."); - goto out; + if (!realpath(priv->workdir, realpath_workdir)) { + len = snprintf(errmsg, sizeof(errmsg), + "Failed to " + "get realpath of %s: %s", + priv->workdir, strerror(errno)); + if (len < 0) { + strcpy(errmsg, "<error>"); + } + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; + } + + if (!realpath(abs_filename, realpath_filename)) { + snprintf(errmsg, sizeof(errmsg), + "Failed to get " + "realpath of %s: %s", + filename, strerror(errno)); + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; + } + + /* Add Trailing slash to workdir, without slash strncmp + will succeed for /var/lib/glusterd_bad */ + len = snprintf(workdir, sizeof(workdir), "%s/", realpath_workdir); + if ((len < 0) || (len >= sizeof(workdir))) { + ret = -1; + goto out; + } + + /* Protect against file copy outside $workdir */ + if (strncmp(workdir, realpath_filename, strlen(workdir))) { + len = snprintf(errmsg, sizeof(errmsg), + "Source file" + " is outside of %s directory", + priv->workdir); + if (len < 0) { + strcpy(errmsg, "<error>"); + } + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; } - uuid_utoa_r (MY_UUID, uuid_str); - if (!strcmp (uuid_str, host_uuid)) { - ret = dict_get_str (dict, "source", &filename); - if (ret < 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, - "Unable to fetch filename from dict."); - *op_errstr = gf_strdup ("command unsuccessful"); - goto out; - } - len = snprintf (abs_filename, sizeof(abs_filename), - "%s/%s", priv->workdir, filename); - if ((len < 0) || (len >= sizeof(abs_filename))) { - ret = -1; - goto out; - } - - if (!realpath (priv->workdir, realpath_workdir)) { - len = snprintf (errmsg, sizeof (errmsg), "Failed to " - "get realpath of %s: %s", - priv->workdir, strerror (errno)); - if (len < 0) { - strcpy(errmsg, "<error>"); - } - *op_errstr = gf_strdup (errmsg); - ret = -1; - goto out; - } - - if (!realpath (abs_filename, realpath_filename)) { - snprintf (errmsg, sizeof (errmsg), "Failed to get " - "realpath of %s: %s", filename, - strerror (errno)); - *op_errstr = gf_strdup (errmsg); - ret = -1; - goto out; - } - - /* Add Trailing slash to workdir, without slash strncmp - will succeed for /var/lib/glusterd_bad */ - len = snprintf (workdir, sizeof(workdir), "%s/", - realpath_workdir); - if ((len < 0) || (len >= sizeof(workdir))) { - ret = -1; - goto out; - } - - /* Protect against file copy outside $workdir */ - if (strncmp (workdir, realpath_filename, strlen (workdir))) { - len = snprintf (errmsg, sizeof (errmsg), "Source file" - " is outside of %s directory", - priv->workdir); - if (len < 0) { - strcpy(errmsg, "<error>"); - } - *op_errstr = gf_strdup (errmsg); - ret = -1; - goto out; - } - - ret = sys_lstat (abs_filename, &stbuf); - if (ret) { - len = snprintf (errmsg, sizeof (errmsg), "Source file" - " does not exist in %s", - priv->workdir); - if (len < 0) { - strcpy(errmsg, "<error>"); - } - *op_errstr = gf_strdup (errmsg); - goto out; - } + ret = sys_lstat(abs_filename, &stbuf); + if (ret) { + len = snprintf(errmsg, sizeof(errmsg), + "Source file" + " does not exist in %s", + priv->workdir); + if (len < 0) { + strcpy(errmsg, "<error>"); + } + *op_errstr = gf_strdup(errmsg); + goto out; + } - if (!S_ISREG(stbuf.st_mode)) { - snprintf (errmsg, sizeof (errmsg), "Source file" - " is not a regular file."); - *op_errstr = gf_strdup (errmsg); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SRC_FILE_ERROR, - "%s", errmsg); - ret = -1; - goto out; - } + if (!S_ISREG(stbuf.st_mode)) { + snprintf(errmsg, sizeof(errmsg), + "Source file" + " is not a regular file."); + *op_errstr = gf_strdup(errmsg); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, "%s", + errmsg); + ret = -1; + goto out; } + } - ret = 0; + ret = 0; out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } int -glusterd_get_statefile_name (glusterd_volinfo_t *volinfo, char *slave, - char *conf_path, char **statefile, - gf_boolean_t *is_template_in_use) +glusterd_get_statefile_name(glusterd_volinfo_t *volinfo, char *slave, + char *conf_path, char **statefile, + gf_boolean_t *is_template_in_use) { - char *master = NULL; - char *buf = NULL; - char *working_conf_path = NULL; - char temp_conf_path[PATH_MAX] = ""; - dict_t *confd = NULL; - glusterd_conf_t *priv = NULL; - int ret = -1; - struct stat stbuf = {0,}; - xlator_t *this = NULL; - int32_t len = 0; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (this->private); - GF_ASSERT (volinfo); - GF_ASSERT (conf_path); - GF_ASSERT (is_template_in_use); - - master = volinfo->volname; - - confd = dict_new (); - if (!confd) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, - "Unable to create new dict"); - goto out; - } - - priv = THIS->private; - - len = snprintf (temp_conf_path, sizeof(temp_conf_path), - "%s/"GSYNC_CONF_TEMPLATE, priv->workdir); - if ((len < 0) || (len >= sizeof(temp_conf_path))) { - goto out; - } - - ret = sys_lstat (conf_path, &stbuf); - if (!ret) { - gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_CONFIG_INFO, - "Using passed config template(%s).", - conf_path); - working_conf_path = conf_path; - } else { - gf_msg (this->name, GF_LOG_WARNING, ENOENT, - GD_MSG_FILE_OP_FAILED, - "Config file (%s) missing. Looking for template config" - " file (%s)", conf_path, temp_conf_path); - ret = sys_lstat (temp_conf_path, &stbuf); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, ENOENT, - GD_MSG_FILE_OP_FAILED, "Template " - "config file (%s) missing.", temp_conf_path); - goto out; - } - gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG, - "Using default config template(%s).", temp_conf_path); - working_conf_path = temp_conf_path; - *is_template_in_use = _gf_true; - } - -fetch_data: - ret = glusterd_gsync_get_config (master, slave, working_conf_path, - confd); + char *master = NULL; + char *buf = NULL; + char *working_conf_path = NULL; + char temp_conf_path[PATH_MAX] = ""; + dict_t *confd = NULL; + glusterd_conf_t *priv = NULL; + int ret = -1; + struct stat stbuf = { + 0, + }; + xlator_t *this = NULL; + int32_t len = 0; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(this->private); + GF_ASSERT(volinfo); + GF_ASSERT(conf_path); + GF_ASSERT(is_template_in_use); + + master = volinfo->volname; + + confd = dict_new(); + if (!confd) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, + "Unable to create new dict"); + goto out; + } + + priv = THIS->private; + + len = snprintf(temp_conf_path, sizeof(temp_conf_path), + "%s/" GSYNC_CONF_TEMPLATE, priv->workdir); + if ((len < 0) || (len >= sizeof(temp_conf_path))) { + goto out; + } + + ret = sys_lstat(conf_path, &stbuf); + if (!ret) { + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONFIG_INFO, + "Using passed config template(%s).", conf_path); + working_conf_path = conf_path; + } else { + gf_msg(this->name, GF_LOG_WARNING, ENOENT, GD_MSG_FILE_OP_FAILED, + "Config file (%s) missing. Looking for template config" + " file (%s)", + conf_path, temp_conf_path); + ret = sys_lstat(temp_conf_path, &stbuf); if (ret) { - if (*is_template_in_use == _gf_false) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GET_CONFIG_INFO_FAILED, - "Unable to get configuration data " - "for %s(master), %s(slave). " - "Trying template config.", - master, slave); - working_conf_path = temp_conf_path; - *is_template_in_use = _gf_true; - goto fetch_data; - } else { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GET_CONFIG_INFO_FAILED, - "Unable to get configuration data " - "for %s(master), %s(slave) from " - "template config", - master, slave); - goto out; - } - } + gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED, + "Template " + "config file (%s) missing.", + temp_conf_path); + goto out; + } + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG, + "Using default config template(%s).", temp_conf_path); + working_conf_path = temp_conf_path; + *is_template_in_use = _gf_true; + } - ret = dict_get_param (confd, "state_file", &buf); - if (ret) { - if (*is_template_in_use == _gf_false) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, - "Unable to get state_file's name. " - "Trying template config."); - working_conf_path = temp_conf_path; - *is_template_in_use = _gf_true; - goto fetch_data; - } else { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GET_STATEFILE_NAME_FAILED, - "Unable to get state_file's " - "name from template."); - goto out; - } +fetch_data: + ret = glusterd_gsync_get_config(master, slave, working_conf_path, confd); + if (ret) { + if (*is_template_in_use == _gf_false) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED, + "Unable to get configuration data " + "for %s(master), %s(slave). " + "Trying template config.", + master, slave); + working_conf_path = temp_conf_path; + *is_template_in_use = _gf_true; + goto fetch_data; + } else { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED, + "Unable to get configuration data " + "for %s(master), %s(slave) from " + "template config", + master, slave); + goto out; + } + } + + ret = dict_get_param(confd, "state_file", &buf); + if (ret) { + if (*is_template_in_use == _gf_false) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get state_file's name. " + "Trying template config."); + working_conf_path = temp_conf_path; + *is_template_in_use = _gf_true; + goto fetch_data; + } else { + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_GET_STATEFILE_NAME_FAILED, + "Unable to get state_file's " + "name from template."); + goto out; } + } - ret = 0; - out: - if (buf) { - *statefile = gf_strdup(buf); - if (!*statefile) - ret = -1; - } + ret = 0; +out: + if (buf) { + *statefile = gf_strdup(buf); + if (!*statefile) + ret = -1; + } - if (confd) - dict_unref (confd); + if (confd) + dict_unref(confd); - gf_msg_debug (this->name, 0, "Returning %d ", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d ", ret); + return ret; } int -glusterd_create_status_file (char *master, char *slave, char *slave_host, - char *slave_vol, char *status) +glusterd_create_status_file(char *master, char *slave, char *slave_host, + char *slave_vol, char *status) { - int ret = -1; - runner_t runner = {0,}; - glusterd_conf_t *priv = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - if (THIS) - priv = THIS->private; - if (priv == NULL) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GLUSTERD_PRIV_NOT_FOUND, - "priv of glusterd not present"); - goto out; - } + int ret = -1; + runner_t runner = { + 0, + }; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; - if (!status) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_STATUS_NULL, - "Status Empty"); - goto out; - } - gf_msg_debug (this->name, 0, "slave = %s", slave); - - runinit (&runner); - runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "--create", - status, "-c", NULL); - runner_argprintf (&runner, "%s/"GEOREP"/%s_%s_%s/gsyncd.conf", - priv->workdir, master, slave_host, slave_vol); - runner_argprintf (&runner, "--iprefix=%s", DATADIR); - runner_argprintf (&runner, ":%s", master); - runner_add_args (&runner, slave, NULL); - synclock_unlock (&priv->big_lock); - ret = runner_run (&runner); - synclock_lock (&priv->big_lock); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_STATUSFILE_CREATE_FAILED, - "Creating status file failed."); - ret = -1; - goto out; - } + this = THIS; + GF_ASSERT(this); - ret = 0; + if (THIS) + priv = THIS->private; + if (priv == NULL) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND, + "priv of glusterd not present"); + goto out; + } + + if (!status) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATUS_NULL, "Status Empty"); + goto out; + } + gf_msg_debug(this->name, 0, "slave = %s", slave); + + runinit(&runner); + runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "--create", status, "-c", + NULL); + runner_argprintf(&runner, "%s/" GEOREP "/%s_%s_%s/gsyncd.conf", + priv->workdir, master, slave_host, slave_vol); + runner_argprintf(&runner, "--iprefix=%s", DATADIR); + runner_argprintf(&runner, ":%s", master); + runner_add_args(&runner, slave, NULL); + synclock_unlock(&priv->big_lock); + ret = runner_run(&runner); + synclock_lock(&priv->big_lock); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATUSFILE_CREATE_FAILED, + "Creating status file failed."); + ret = -1; + goto out; + } + + ret = 0; out: - gf_msg_debug (this->name, 0, "returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "returning %d", ret); + return ret; } static int -glusterd_verify_slave (char *volname, char *slave_url, char *slave_vol, - int ssh_port, char **op_errstr, - gf_boolean_t *is_force_blocker) +glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol, + int ssh_port, char **op_errstr, + gf_boolean_t *is_force_blocker) { - int32_t ret = -1; - runner_t runner = {0,}; - char log_file_path[PATH_MAX] = ""; - char buf[PATH_MAX] = ""; - char *tmp = NULL; - char *slave_url_buf = NULL; - char *save_ptr = NULL; - char *slave_user = NULL; - char *slave_ip = NULL; - glusterd_conf_t *priv = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - priv = this->private; - GF_ASSERT (priv); - GF_ASSERT (volname); - GF_ASSERT (slave_url); - GF_ASSERT (slave_vol); - - /* Fetch the slave_user and slave_ip from the slave_url. - * If the slave_user is not present. Use "root" - */ - if (strstr(slave_url, "@")) { - slave_url_buf = gf_strdup (slave_url); - if (!slave_url_buf) - goto out; - - slave_user = strtok_r (slave_url_buf, "@", &save_ptr); - slave_ip = strtok_r (NULL, "@", &save_ptr); - } else { - slave_user = "root"; - slave_ip = slave_url; - } - - if (!slave_user || !slave_ip) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_URL_INVALID, - "Invalid slave url."); - goto out; + int32_t ret = -1; + runner_t runner = { + 0, + }; + char log_file_path[PATH_MAX] = ""; + char buf[PATH_MAX] = ""; + char *tmp = NULL; + char *slave_url_buf = NULL; + char *save_ptr = NULL; + char *slave_user = NULL; + char *slave_ip = NULL; + glusterd_conf_t *priv = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + priv = this->private; + GF_ASSERT(priv); + GF_ASSERT(volname); + GF_ASSERT(slave_url); + GF_ASSERT(slave_vol); + + /* Fetch the slave_user and slave_ip from the slave_url. + * If the slave_user is not present. Use "root" + */ + if (strstr(slave_url, "@")) { + slave_url_buf = gf_strdup(slave_url); + if (!slave_url_buf) + goto out; + + slave_user = strtok_r(slave_url_buf, "@", &save_ptr); + slave_ip = strtok_r(NULL, "@", &save_ptr); + } else { + slave_user = "root"; + slave_ip = slave_url; + } + + if (!slave_user || !slave_ip) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_URL_INVALID, + "Invalid slave url."); + goto out; + } + + snprintf(log_file_path, sizeof(log_file_path), + DEFAULT_LOG_FILE_DIRECTORY "/create_verify_log"); + + runinit(&runner); + runner_add_args(&runner, GSYNCD_PREFIX "/gverify.sh", NULL); + runner_argprintf(&runner, "%s", volname); + runner_argprintf(&runner, "%s", slave_user); + runner_argprintf(&runner, "%s", slave_ip); + runner_argprintf(&runner, "%s", slave_vol); + runner_argprintf(&runner, "%d", ssh_port); + runner_argprintf(&runner, "%s", log_file_path); + gf_msg_debug(this->name, 0, "gverify Args = %s %s %s %s %s %s %s", + runner.argv[0], runner.argv[1], runner.argv[2], runner.argv[3], + runner.argv[4], runner.argv[5], runner.argv[6]); + runner_redir(&runner, STDOUT_FILENO, RUN_PIPE); + synclock_unlock(&priv->big_lock); + ret = runner_run(&runner); + synclock_lock(&priv->big_lock); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_SLAVE, + "Not a valid slave"); + ret = glusterd_gsync_read_frm_status(log_file_path, buf, sizeof(buf)); + if (ret <= 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_READ_ERROR, + "Unable to read from %s", log_file_path); + goto out; } - snprintf (log_file_path, sizeof(log_file_path), - DEFAULT_LOG_FILE_DIRECTORY"/create_verify_log"); - - runinit (&runner); - runner_add_args (&runner, GSYNCD_PREFIX"/gverify.sh", NULL); - runner_argprintf (&runner, "%s", volname); - runner_argprintf (&runner, "%s", slave_user); - runner_argprintf (&runner, "%s", slave_ip); - runner_argprintf (&runner, "%s", slave_vol); - runner_argprintf (&runner, "%d", ssh_port); - runner_argprintf (&runner, "%s", log_file_path); - gf_msg_debug (this->name, 0, "gverify Args = %s %s %s %s %s %s %s", - runner.argv[0], runner.argv[1], runner.argv[2], - runner.argv[3], runner.argv[4], runner.argv[5], - runner.argv[6]); - runner_redir (&runner, STDOUT_FILENO, RUN_PIPE); - synclock_unlock (&priv->big_lock); - ret = runner_run (&runner); - synclock_lock (&priv->big_lock); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_SLAVE, - "Not a valid slave"); - ret = glusterd_gsync_read_frm_status (log_file_path, - buf, sizeof(buf)); - if (ret <= 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_READ_ERROR, - "Unable to read from %s", log_file_path); - goto out; - } - - /* Tokenize the error message from gverify.sh to figure out - * if the error is a force blocker or not. */ - tmp = strtok_r (buf, "|", &save_ptr); - if (!tmp) { - ret = -1; - goto out; - } - if (!strcmp (tmp, "FORCE_BLOCKER")) - *is_force_blocker = 1; - else { - /* No FORCE_BLOCKER flag present so all that is - * present is the error message. */ - *is_force_blocker = 0; - *op_errstr = gf_strdup (tmp); - ret = -1; - goto out; - } - - /* Copy rest of the error message to op_errstr */ - tmp = strtok_r (NULL, "|", &save_ptr); - if (tmp) - *op_errstr = gf_strdup (tmp); - ret = -1; - goto out; - } - ret = 0; + /* Tokenize the error message from gverify.sh to figure out + * if the error is a force blocker or not. */ + tmp = strtok_r(buf, "|", &save_ptr); + if (!tmp) { + ret = -1; + goto out; + } + if (!strcmp(tmp, "FORCE_BLOCKER")) + *is_force_blocker = 1; + else { + /* No FORCE_BLOCKER flag present so all that is + * present is the error message. */ + *is_force_blocker = 0; + *op_errstr = gf_strdup(tmp); + ret = -1; + goto out; + } + + /* Copy rest of the error message to op_errstr */ + tmp = strtok_r(NULL, "|", &save_ptr); + if (tmp) + *op_errstr = gf_strdup(tmp); + ret = -1; + goto out; + } + ret = 0; out: - GF_FREE (slave_url_buf); - sys_unlink (log_file_path); - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + GF_FREE(slave_url_buf); + sys_unlink(log_file_path); + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } /** @slave_ip remains unmodified */ int -glusterd_geo_rep_parse_slave (char *slave_url, - char **hostname, char **op_errstr) +glusterd_geo_rep_parse_slave(char *slave_url, char **hostname, char **op_errstr) { - int ret = -1; - char *tmp = NULL; - char *save_ptr = NULL; - char *host = NULL; - char errmsg[PATH_MAX] = ""; - char *saved_url = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (slave_url); - GF_ASSERT (*slave_url); - - saved_url = gf_strdup (slave_url); - if (!saved_url) - goto out; + int ret = -1; + char *tmp = NULL; + char *save_ptr = NULL; + char *host = NULL; + char errmsg[PATH_MAX] = ""; + char *saved_url = NULL; + xlator_t *this = NULL; - /* Checking if hostname has user specified */ - host = strstr (saved_url, "@"); - if (!host) { /* no user specified */ - if (hostname) { - *hostname = gf_strdup (saved_url); - if (!*hostname) - goto out; - } + this = THIS; + GF_ASSERT(this); - ret = 0; - goto out; - } else { - /* Moving the host past the '@' and checking if the - * actual hostname also has '@' */ - host++; - if (strstr (host, "@")) { - gf_msg_debug (this->name, 0, "host = %s", host); - ret = snprintf (errmsg, sizeof(errmsg) - 1, - "Invalid Hostname (%s).", host); - errmsg[ret] = '\0'; - gf_msg (this->name, GF_LOG_ERROR, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", errmsg); - ret = -1; - if (op_errstr) - *op_errstr = gf_strdup (errmsg); - goto out; - } + GF_ASSERT(slave_url); + GF_ASSERT(*slave_url); - ret = -1; + saved_url = gf_strdup(slave_url); + if (!saved_url) + goto out; - /** - * preliminary check for valid slave format. - */ - tmp = strtok_r (saved_url, "@", &save_ptr); - tmp = strtok_r (NULL, "@", &save_ptr); - if (!tmp) - goto out; - if (hostname) { - *hostname = gf_strdup (tmp); - if (!*hostname) - goto out; - } + /* Checking if hostname has user specified */ + host = strstr(saved_url, "@"); + if (!host) { /* no user specified */ + if (hostname) { + *hostname = gf_strdup(saved_url); + if (!*hostname) + goto out; } ret = 0; + goto out; + } else { + /* Moving the host past the '@' and checking if the + * actual hostname also has '@' */ + host++; + if (strstr(host, "@")) { + gf_msg_debug(this->name, 0, "host = %s", host); + ret = snprintf(errmsg, sizeof(errmsg) - 1, "Invalid Hostname (%s).", + host); + errmsg[ret] = '\0'; + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s", + errmsg); + ret = -1; + if (op_errstr) + *op_errstr = gf_strdup(errmsg); + goto out; + } + + ret = -1; + + /** + * preliminary check for valid slave format. + */ + tmp = strtok_r(saved_url, "@", &save_ptr); + tmp = strtok_r(NULL, "@", &save_ptr); + if (!tmp) + goto out; + if (hostname) { + *hostname = gf_strdup(tmp); + if (!*hostname) + goto out; + } + } + + ret = 0; out: - GF_FREE (saved_url); - if (ret) - if (hostname) - GF_FREE (*hostname); - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + GF_FREE(saved_url); + if (ret) + if (hostname) + GF_FREE(*hostname); + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } /* Return -1 only if there is a match in volume uuid */ static int -get_slavehost_from_voluuid (dict_t *dict, char *key, data_t *value, void *data) +get_slavehost_from_voluuid(dict_t *dict, char *key, data_t *value, void *data) { - char *slave_info = NULL; - char *tmp = NULL; - char *slave_host = NULL; - xlator_t *this = NULL; - struct slave_vol_config *slave_vol = NULL; - int i = 0; - int ret = -1; - - this = THIS; - GF_VALIDATE_OR_GOTO ("glusterd", this, out); - - slave_vol = data; - slave_info = value->data; + char *slave_info = NULL; + char *tmp = NULL; + char *slave_host = NULL; + xlator_t *this = NULL; + struct slave_vol_config *slave_vol = NULL; + int i = 0; + int ret = -1; - gf_msg_debug (this->name, 0, "slave_info:%s !", slave_info); + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); - if (!(slave_info) || strlen (slave_info) == 0) { - /* no slaves present, peace */ - ret = 0; - goto out; - } + slave_vol = data; + slave_info = value->data; - /* slave format: - * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */ - while (i++ < 5) { - slave_info = strchr (slave_info, ':'); - if (slave_info) - slave_info++; - else - break; - } + gf_msg_debug(this->name, 0, "slave_info:%s !", slave_info); - if (!(slave_info) || strlen(slave_info) == 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "slave_info format is wrong!"); + if (!(slave_info) || strlen(slave_info) == 0) { + /* no slaves present, peace */ + ret = 0; + goto out; + } + + /* slave format: + * master_node_uuid:ssh://slave_host::slave_vol:slave_voluuid */ + while (i++ < 5) { + slave_info = strchr(slave_info, ':'); + if (slave_info) + slave_info++; + else + break; + } + + if (!(slave_info) || strlen(slave_info) == 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "slave_info format is wrong!"); + ret = -2; + goto out; + } else { + if (strcmp(slave_info, slave_vol->slave_voluuid) == 0) { + ret = -1; + + /* get corresponding slave host for reference*/ + slave_host = value->data; + slave_host = strstr(slave_host, "://"); + if (slave_host) { + slave_host += 3; + } else { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "Invalid slave_host format!"); ret = -2; goto out; - } else { - if (strcmp (slave_info, slave_vol->slave_voluuid) == 0) { - ret = -1; - - /* get corresponding slave host for reference*/ - slave_host = value->data; - slave_host = strstr (slave_host, "://"); - if (slave_host) { - slave_host += 3; - } else { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "Invalid slave_host format!"); - ret = -2; - goto out; - } - /* To go past username in non-root geo-rep session */ - tmp = strchr (slave_host, '@'); - if (tmp) { - if ((tmp - slave_host) >= LOGIN_NAME_MAX) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "Invalid slave user length in %s", - slave_host); - ret = -2; - goto out; - } - strncpy (slave_vol->old_slvuser, slave_host, - (tmp - slave_host)); - slave_vol->old_slvuser[(tmp - slave_host) + 1] - = '\0'; - slave_host = tmp + 1; - } else - strcpy (slave_vol->old_slvuser, "root"); - - tmp = strchr (slave_host, ':'); - if (!tmp) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_VOL_PARSE_FAIL, - "Invalid slave_host!"); - ret = -2; - goto out; - } - - strncpy (slave_vol->old_slvhost, slave_host, - (tmp - slave_host)); - slave_vol->old_slvhost[(tmp - slave_host) + 1] = '\0'; + } + /* To go past username in non-root geo-rep session */ + tmp = strchr(slave_host, '@'); + if (tmp) { + if ((tmp - slave_host) >= LOGIN_NAME_MAX) { + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_SLAVE_VOL_PARSE_FAIL, + "Invalid slave user length in %s", slave_host); + ret = -2; + goto out; + } + strncpy(slave_vol->old_slvuser, slave_host, (tmp - slave_host)); + slave_vol->old_slvuser[(tmp - slave_host) + 1] = '\0'; + slave_host = tmp + 1; + } else + strcpy(slave_vol->old_slvuser, "root"); + + tmp = strchr(slave_host, ':'); + if (!tmp) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_VOL_PARSE_FAIL, + "Invalid slave_host!"); + ret = -2; + goto out; + } - goto out; - } + strncpy(slave_vol->old_slvhost, slave_host, (tmp - slave_host)); + slave_vol->old_slvhost[(tmp - slave_host) + 1] = '\0'; + + goto out; } + } - ret = 0; + ret = 0; out: - return ret; + return ret; } /* Given slave host and slave volume, check whether slave volume uuid @@ -3059,447 +3058,443 @@ out: * If slave volume uuid is present, get corresponding slave host * for reference */ static int -glusterd_get_slavehost_from_voluuid (glusterd_volinfo_t *volinfo, - char *slave_host, char *slave_vol, - struct slave_vol_config *slave1) +glusterd_get_slavehost_from_voluuid(glusterd_volinfo_t *volinfo, + char *slave_host, char *slave_vol, + struct slave_vol_config *slave1) { - int ret = -1; - xlator_t *this = NULL; + int ret = -1; + xlator_t *this = NULL; - this = THIS; + this = THIS; - GF_VALIDATE_OR_GOTO (this->name, volinfo, out); + GF_VALIDATE_OR_GOTO(this->name, volinfo, out); - ret = dict_foreach (volinfo->gsync_slaves, get_slavehost_from_voluuid, - slave1); + ret = dict_foreach(volinfo->gsync_slaves, get_slavehost_from_voluuid, + slave1); out: - return ret; + return ret; } int -glusterd_op_stage_gsync_create (dict_t *dict, char **op_errstr) +glusterd_op_stage_gsync_create(dict_t *dict, char **op_errstr) { - char *down_peerstr = NULL; - char *slave = NULL; - char *volname = NULL; - char *host_uuid = NULL; - char *statefile = NULL; - char *slave_url = NULL; - char *slave_host = NULL; - char *slave_vol = NULL; - char *conf_path = NULL; - char errmsg[PATH_MAX] = ""; - char common_pem_file[PATH_MAX] = ""; - char hook_script[PATH_MAX] = ""; - char uuid_str [64] = ""; - int ret = -1; - int is_pem_push = -1; - int ssh_port = 22; - gf_boolean_t is_force = -1; - gf_boolean_t is_no_verify = -1; - gf_boolean_t is_force_blocker = -1; - gf_boolean_t exists = _gf_false; - gf_boolean_t is_template_in_use = _gf_false; - glusterd_conf_t *conf = NULL; - glusterd_volinfo_t *volinfo = NULL; - struct stat stbuf = {0,}; - xlator_t *this = NULL; - struct slave_vol_config slave1 = {{0},}; - char old_slave_url[SLAVE_URL_INFO_MAX] = {0}; - char old_confpath[PATH_MAX] = {0}; - gf_boolean_t is_running = _gf_false; - char *statedir = NULL; - char statefiledir[PATH_MAX] = {0,}; - gf_boolean_t is_different_slavehost = _gf_false; - gf_boolean_t is_different_username = _gf_false; - char *slave_user = NULL; - char *save_ptr = NULL; - char *slave_url_buf = NULL; - int32_t len = 0; - - this = THIS; - GF_ASSERT (this); - conf = this->private; - GF_ASSERT (conf); - - ret = glusterd_op_gsync_args_get (dict, op_errstr, &volname, - &slave, &host_uuid); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_ARG_FETCH_ERROR, - "Unable to fetch arguments"); - gf_msg_debug (this->name, 0, "Returning %d", ret); - return -1; + char *down_peerstr = NULL; + char *slave = NULL; + char *volname = NULL; + char *host_uuid = NULL; + char *statefile = NULL; + char *slave_url = NULL; + char *slave_host = NULL; + char *slave_vol = NULL; + char *conf_path = NULL; + char errmsg[PATH_MAX] = ""; + char common_pem_file[PATH_MAX] = ""; + char hook_script[PATH_MAX] = ""; + char uuid_str[64] = ""; + int ret = -1; + int is_pem_push = -1; + int ssh_port = 22; + gf_boolean_t is_force = -1; + gf_boolean_t is_no_verify = -1; + gf_boolean_t is_force_blocker = -1; + gf_boolean_t exists = _gf_false; + gf_boolean_t is_template_in_use = _gf_false; + glusterd_conf_t *conf = NULL; + glusterd_volinfo_t *volinfo = NULL; + struct stat stbuf = { + 0, + }; + xlator_t *this = NULL; + struct slave_vol_config slave1 = { + {0}, + }; + char old_slave_url[SLAVE_URL_INFO_MAX] = {0}; + char old_confpath[PATH_MAX] = {0}; + gf_boolean_t is_running = _gf_false; + char *statedir = NULL; + char statefiledir[PATH_MAX] = { + 0, + }; + gf_boolean_t is_different_slavehost = _gf_false; + gf_boolean_t is_different_username = _gf_false; + char *slave_user = NULL; + char *save_ptr = NULL; + char *slave_url_buf = NULL; + int32_t len = 0; + + this = THIS; + GF_ASSERT(this); + conf = this->private; + GF_ASSERT(conf); + + ret = glusterd_op_gsync_args_get(dict, op_errstr, &volname, &slave, + &host_uuid); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_ARG_FETCH_ERROR, + "Unable to fetch arguments"); + gf_msg_debug(this->name, 0, "Returning %d", ret); + return -1; + } + + if (conf->op_version < 2) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION, + "Op Version not supported."); + snprintf(errmsg, sizeof(errmsg), + "One or more nodes do not" + " support the required op version."); + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; + } + + exists = glusterd_check_volume_exists(volname); + ret = glusterd_volinfo_find(volname, &volinfo); + if ((ret) || (!exists)) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND, + "volume name does not exist"); + snprintf(errmsg, sizeof(errmsg), + "Volume name %s does not" + " exist", + volname); + *op_errstr = gf_strdup(errmsg); + gf_msg_debug(this->name, 0, "Returning %d", ret); + return -1; + } + + ret = glusterd_get_slave_details_confpath(volinfo, dict, &slave_url, + &slave_host, &slave_vol, + &conf_path, op_errstr); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR, + "Unable to fetch slave or confpath details."); + ret = -1; + goto out; + } + + is_force = dict_get_str_boolean(dict, "force", _gf_false); + + uuid_utoa_r(MY_UUID, uuid_str); + if (!strcmp(uuid_str, host_uuid)) { + ret = glusterd_are_vol_all_peers_up(volinfo, &conf->peers, + &down_peerstr); + if ((ret == _gf_false) && !is_force) { + snprintf(errmsg, sizeof(errmsg), + "Peer %s," + " which is a part of %s volume, is" + " down. Please bring up the peer and" + " retry.", + down_peerstr, volinfo->volname); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_DISCONNECTED, "%s", + errmsg); + *op_errstr = gf_strdup(errmsg); + GF_FREE(down_peerstr); + down_peerstr = NULL; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return -1; + } else if (ret == _gf_false) { + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_PEER_DISCONNECTED, + "Peer %s, which is a part of %s volume, is" + " down. Force creating geo-rep session." + " On bringing up the peer, re-run" + " \"gluster system:: execute" + " gsec_create\" and \"gluster volume" + " geo-replication %s %s create push-pem" + " force\"", + down_peerstr, volinfo->volname, volinfo->volname, slave); + GF_FREE(down_peerstr); + down_peerstr = NULL; + } + + ret = dict_get_int32(dict, "ssh_port", &ssh_port); + if (ret < 0 && ret != -ENOENT) { + snprintf(errmsg, sizeof(errmsg), + "Fetching ssh_port failed while " + "handling " GEOREP " options"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s", + errmsg); + goto out; + } + + is_no_verify = dict_get_str_boolean(dict, "no_verify", _gf_false); + + if (!is_no_verify) { + /* Checking if slave host is pingable, has proper passwordless + * ssh login setup, slave volume is created, slave vol is empty, + * and if it has enough memory and bypass in case of force if + * the error is not a force blocker */ + ret = glusterd_verify_slave(volname, slave_url, slave_vol, ssh_port, + op_errstr, &is_force_blocker); + if (ret) { + if (is_force && !is_force_blocker) { + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_INVALID_SLAVE, + "%s is not a valid slave " + "volume. Error: %s. Force " + "creating geo-rep" + " session.", + slave, *op_errstr); + } else { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_SLAVE, + "%s is not a valid slave " + "volume. Error: %s", + slave, *op_errstr); + ret = -1; + + goto out; + } + } } - if (conf->op_version < 2) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION, - "Op Version not supported."); - snprintf (errmsg, sizeof(errmsg), "One or more nodes do not" - " support the required op version."); - *op_errstr = gf_strdup (errmsg); + ret = dict_get_int32(dict, "push_pem", &is_pem_push); + if (!ret && is_pem_push) { + ret = snprintf(common_pem_file, sizeof(common_pem_file), + "%s" GLUSTERD_COMMON_PEM_PUB_FILE, conf->workdir); + if ((ret < 0) || (ret >= sizeof(common_pem_file))) { ret = -1; goto out; - } - - exists = glusterd_check_volume_exists (volname); - ret = glusterd_volinfo_find (volname, &volinfo); - if ((ret) || (!exists)) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND, - "volume name does not exist"); - snprintf (errmsg, sizeof(errmsg), "Volume name %s does not" - " exist", volname); - *op_errstr = gf_strdup (errmsg); - gf_msg_debug (this->name, 0, "Returning %d", ret); - return -1; - } + } - ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_url, - &slave_host, &slave_vol, - &conf_path, op_errstr); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, - "Unable to fetch slave or confpath details."); + ret = snprintf(hook_script, sizeof(hook_script), + "%s" GLUSTERD_CREATE_HOOK_SCRIPT, conf->workdir); + if ((ret < 0) || (ret >= sizeof(hook_script))) { ret = -1; goto out; - } - - is_force = dict_get_str_boolean (dict, "force", _gf_false); - - uuid_utoa_r (MY_UUID, uuid_str); - if (!strcmp (uuid_str, host_uuid)) { - ret = glusterd_are_vol_all_peers_up (volinfo, - &conf->peers, - &down_peerstr); - if ((ret == _gf_false) && !is_force) { - snprintf (errmsg, sizeof (errmsg), "Peer %s," - " which is a part of %s volume, is" - " down. Please bring up the peer and" - " retry.", down_peerstr, - volinfo->volname); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_PEER_DISCONNECTED, - "%s", errmsg); - *op_errstr = gf_strdup (errmsg); - GF_FREE (down_peerstr); - down_peerstr = NULL; - gf_msg_debug (this->name, 0, "Returning %d", ret); - return -1; - } else if (ret == _gf_false) { - gf_msg (this->name, GF_LOG_INFO, 0, - GD_MSG_PEER_DISCONNECTED, - "Peer %s, which is a part of %s volume, is" - " down. Force creating geo-rep session." - " On bringing up the peer, re-run" - " \"gluster system:: execute" - " gsec_create\" and \"gluster volume" - " geo-replication %s %s create push-pem" - " force\"", down_peerstr, volinfo->volname, - volinfo->volname, slave); - GF_FREE (down_peerstr); - down_peerstr = NULL; - } - - ret = dict_get_int32 (dict, "ssh_port", &ssh_port); - if (ret < 0 && ret != -ENOENT) { - snprintf (errmsg, sizeof (errmsg), - "Fetching ssh_port failed while " - "handling "GEOREP" options"); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, "%s", errmsg); - goto out; - } + } - is_no_verify = dict_get_str_boolean (dict, "no_verify", _gf_false); - - if (!is_no_verify) { - /* Checking if slave host is pingable, has proper passwordless - * ssh login setup, slave volume is created, slave vol is empty, - * and if it has enough memory and bypass in case of force if - * the error is not a force blocker */ - ret = glusterd_verify_slave (volname, slave_url, slave_vol, - ssh_port, op_errstr, - &is_force_blocker); - if (ret) { - if (is_force && !is_force_blocker) { - gf_msg (this->name, GF_LOG_INFO, 0, - GD_MSG_INVALID_SLAVE, - "%s is not a valid slave " - "volume. Error: %s. Force " - "creating geo-rep" - " session.", slave, - *op_errstr); - } else { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_INVALID_SLAVE, - "%s is not a valid slave " - "volume. Error: %s", - slave, *op_errstr); - ret = -1; - - goto out; - } - } + ret = sys_lstat(common_pem_file, &stbuf); + if (ret) { + len = snprintf(errmsg, sizeof(errmsg), + "%s" + " required for push-pem is" + " not present. Please run" + " \"gluster system:: execute" + " gsec_create\"", + common_pem_file); + if (len < 0) { + strcpy(errmsg, "<error>"); } + gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED, + "%s", errmsg); + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; + } - ret = dict_get_int32 (dict, "push_pem", &is_pem_push); - if (!ret && is_pem_push) { - ret = snprintf (common_pem_file, - sizeof(common_pem_file), - "%s"GLUSTERD_COMMON_PEM_PUB_FILE, - conf->workdir); - if ((ret < 0) || (ret >= sizeof(common_pem_file))) { - ret = -1; - goto out; - } - - ret = snprintf (hook_script, sizeof(hook_script), - "%s"GLUSTERD_CREATE_HOOK_SCRIPT, - conf->workdir); - if ((ret < 0) || (ret >= sizeof(hook_script))) { - ret = -1; - goto out; - } - - ret = sys_lstat (common_pem_file, &stbuf); - if (ret) { - len = snprintf (errmsg, sizeof (errmsg), "%s" - " required for push-pem is" - " not present. Please run" - " \"gluster system:: execute" - " gsec_create\"", - common_pem_file); - if (len < 0) { - strcpy(errmsg, "<error>"); - } - gf_msg (this->name, GF_LOG_ERROR, ENOENT, - GD_MSG_FILE_OP_FAILED, - "%s", errmsg); - *op_errstr = gf_strdup (errmsg); - ret = -1; - goto out; - } - - ret = sys_lstat (hook_script, &stbuf); - if (ret) { - len = snprintf (errmsg, sizeof (errmsg), - "The hook-script (%s) " - "required for push-pem is not " - "present. Please install the " - "hook-script and retry", - hook_script); - if (len < 0) { - strcpy(errmsg, "<error>"); - } - gf_msg (this->name, GF_LOG_ERROR, ENOENT, - GD_MSG_FILE_OP_FAILED, "%s", errmsg); - *op_errstr = gf_strdup (errmsg); - ret = -1; - goto out; - } - - if (!S_ISREG(stbuf.st_mode)) { - len = snprintf (errmsg, sizeof (errmsg), "%s" - " required for push-pem is" - " not a regular file. Please" - " run \"gluster system:: " - "execute gsec_create\"", - common_pem_file); - if (len < 0) { - strcpy(errmsg, "<error>"); - } - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_REG_FILE_MISSING, "%s", errmsg); - ret = -1; - goto out; - } + ret = sys_lstat(hook_script, &stbuf); + if (ret) { + len = snprintf(errmsg, sizeof(errmsg), + "The hook-script (%s) " + "required for push-pem is not " + "present. Please install the " + "hook-script and retry", + hook_script); + if (len < 0) { + strcpy(errmsg, "<error>"); } - } - - ret = glusterd_get_statefile_name (volinfo, slave, - conf_path, &statefile, - &is_template_in_use); - if (ret) { - if (!strstr(slave, "::")) - snprintf (errmsg, sizeof (errmsg), - "%s is not a valid slave url.", slave); - else - snprintf (errmsg, sizeof (errmsg), "Please check gsync " - "config file. Unable to get statefile's name"); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_STATEFILE_NAME_NOT_FOUND, - "%s", errmsg); + gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED, + "%s", errmsg); + *op_errstr = gf_strdup(errmsg); ret = -1; goto out; - } + } - ret = dict_set_str (dict, "statefile", statefile); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, - "Unable to store statefile path"); + if (!S_ISREG(stbuf.st_mode)) { + len = snprintf(errmsg, sizeof(errmsg), + "%s" + " required for push-pem is" + " not a regular file. Please" + " run \"gluster system:: " + "execute gsec_create\"", + common_pem_file); + if (len < 0) { + strcpy(errmsg, "<error>"); + } + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REG_FILE_MISSING, + "%s", errmsg); + ret = -1; goto out; + } } + } - if (snprintf (statefiledir, sizeof (statefiledir), "%s", statefile) >= - sizeof (statefiledir)) { - snprintf (errmsg, sizeof (errmsg), - "Failed copying statefiledir"); - goto out; - } - statedir = dirname (statefiledir); + ret = glusterd_get_statefile_name(volinfo, slave, conf_path, &statefile, + &is_template_in_use); + if (ret) { + if (!strstr(slave, "::")) + snprintf(errmsg, sizeof(errmsg), "%s is not a valid slave url.", + slave); + else + snprintf(errmsg, sizeof(errmsg), + "Please check gsync " + "config file. Unable to get statefile's name"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATEFILE_NAME_NOT_FOUND, + "%s", errmsg); + ret = -1; + goto out; + } + + ret = dict_set_str(dict, "statefile", statefile); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Unable to store statefile path"); + goto out; + } + + if (snprintf(statefiledir, sizeof(statefiledir), "%s", statefile) >= + sizeof(statefiledir)) { + snprintf(errmsg, sizeof(errmsg), "Failed copying statefiledir"); + goto out; + } + statedir = dirname(statefiledir); + + ret = sys_lstat(statedir, &stbuf); + if (!ret && !is_force) { + snprintf(errmsg, sizeof(errmsg), + "Session between %s" + " and %s is already created.", + volinfo->volname, slave); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SESSION_ALREADY_EXIST, "%s", + errmsg); + ret = -1; + goto out; + } else if (!ret) + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION, + "Session between %s and %s is already created. Force" + " creating again.", + volinfo->volname, slave); + + ret = glusterd_get_slave_voluuid(slave_host, slave_vol, + slave1.slave_voluuid); + if ((ret) || (strlen(slave1.slave_voluuid) == 0)) { + snprintf(errmsg, sizeof(errmsg), "Unable to get remote volume uuid."); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REMOTE_VOL_UUID_FAIL, "%s", + errmsg); + ret = -1; + goto out; + } + + ret = dict_set_dynstr_with_alloc(dict, "slave_voluuid", + slave1.slave_voluuid); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Unable to set slave volume uuid in the dict"); + goto out; + } + + /* Check whether session is already created using slave volume uuid */ + ret = glusterd_get_slavehost_from_voluuid(volinfo, slave_host, slave_vol, + &slave1); + if (ret == -1) { + if (!is_force) { + snprintf(errmsg, sizeof(errmsg), + "Session between %s" + " and %s:%s is already created! Cannot create " + "with new slave:%s again!", + volinfo->volname, slave1.old_slvhost, slave_vol, + slave_host); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FORCE_CREATE_SESSION, + "Session between" + " %s and %s:%s is already created! " + "Cannot create with new slave:%s again!", + volinfo->volname, slave1.old_slvhost, slave_vol, slave_host); + goto out; + } + + /* Now, check whether session is already started.If so, warn!*/ + is_different_slavehost = (strcmp(slave_host, slave1.old_slvhost) != 0) + ? _gf_true + : _gf_false; - ret = sys_lstat (statedir, &stbuf); - if (!ret && !is_force) { - snprintf (errmsg, sizeof (errmsg), "Session between %s" - " and %s is already created.", - volinfo->volname, slave); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SESSION_ALREADY_EXIST, - "%s", errmsg); + if (strstr(slave_url, "@")) { + slave_url_buf = gf_strdup(slave_url); + if (!slave_url_buf) { + gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, + "Unable to allocate memory"); ret = -1; goto out; - } else if (!ret) - gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION, - "Session between %s and %s is already created. Force" - " creating again.", volinfo->volname, slave); - - ret = glusterd_get_slave_voluuid (slave_host, slave_vol, - slave1.slave_voluuid); - if ((ret) || (strlen(slave1.slave_voluuid) == 0)) { - snprintf (errmsg, sizeof (errmsg), - "Unable to get remote volume uuid."); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_REMOTE_VOL_UUID_FAIL, "%s", errmsg); + } + slave_user = strtok_r(slave_url_buf, "@", &save_ptr); + } else + slave_user = "root"; + is_different_username = (strcmp(slave_user, slave1.old_slvuser) != 0) + ? _gf_true + : _gf_false; + + /* Do the check, only if different slave host/slave user */ + if (is_different_slavehost || is_different_username) { + len = snprintf(old_confpath, sizeof(old_confpath), + "%s/" GEOREP "/%s_%s_%s/gsyncd.conf", conf->workdir, + volinfo->volname, slave1.old_slvhost, slave_vol); + if ((len < 0) || (len >= sizeof(old_confpath))) { ret = -1; goto out; - } + } - ret = dict_set_dynstr_with_alloc (dict, "slave_voluuid", - slave1.slave_voluuid); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, - "Unable to set slave volume uuid in the dict"); + /* construct old slave url with (old) slave host */ + len = snprintf(old_slave_url, sizeof(old_slave_url), "%s::%s", + slave1.old_slvhost, slave_vol); + if ((len < 0) || (len >= sizeof(old_slave_url))) { + ret = -1; goto out; - } - - /* Check whether session is already created using slave volume uuid */ - ret = glusterd_get_slavehost_from_voluuid (volinfo, slave_host, - slave_vol, &slave1); - if (ret == -1) { - if (!is_force) { - snprintf (errmsg, sizeof (errmsg), "Session between %s" - " and %s:%s is already created! Cannot create " - "with new slave:%s again!", - volinfo->volname, slave1.old_slvhost, - slave_vol, slave_host); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_FORCE_CREATE_SESSION, "Session between" - " %s and %s:%s is already created! " - "Cannot create with new slave:%s again!", - volinfo->volname, slave1.old_slvhost, - slave_vol, slave_host); - goto out; - } - - /* Now, check whether session is already started.If so, warn!*/ - is_different_slavehost = - (strcmp (slave_host, slave1.old_slvhost) != 0) - ? _gf_true : _gf_false; - - if (strstr (slave_url, "@")) { - slave_url_buf = gf_strdup (slave_url); - if (!slave_url_buf) { - gf_msg (this->name, GF_LOG_ERROR, ENOMEM, - GD_MSG_NO_MEMORY, - "Unable to allocate memory"); - ret = -1; - goto out; - } - slave_user = strtok_r (slave_url_buf, "@", &save_ptr); - } else - slave_user = "root"; - is_different_username = - (strcmp (slave_user, slave1.old_slvuser) != 0) - ? _gf_true : _gf_false; - - /* Do the check, only if different slave host/slave user */ - if (is_different_slavehost || is_different_username) { - len = snprintf (old_confpath, sizeof(old_confpath), - "%s/"GEOREP"/%s_%s_%s/gsyncd.conf", - conf->workdir, volinfo->volname, - slave1.old_slvhost, slave_vol); - if ((len < 0) || (len >= sizeof(old_confpath))) { - ret = -1; - goto out; - } - - /* construct old slave url with (old) slave host */ - len = snprintf (old_slave_url, sizeof(old_slave_url), - "%s::%s", slave1.old_slvhost, - slave_vol); - if ((len < 0) || (len >= sizeof(old_slave_url))) { - ret = -1; - goto out; - } - - ret = glusterd_check_gsync_running_local (volinfo->volname, - old_slave_url, old_confpath, &is_running); - if (_gf_true == is_running) { - (void) snprintf (errmsg, sizeof(errmsg), "Geo" - "-replication session between %s and %s" - " is still active. Please stop the " - "session and retry.", - volinfo->volname, old_slave_url); - ret = -1; - goto out; - } - } - - ret = dict_set_dynstr_with_alloc (dict, "old_slavehost", - slave1.old_slvhost); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_SET_FAILED, - "Unable to set old_slavehost in the dict"); - goto out; - } + } - ret = dict_set_int32 (dict, "existing_session", _gf_true); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_SET_FAILED, - "Unable to set existing_session in the dict"); - goto out; - } - } else if (ret == -2) { - snprintf (errmsg, sizeof (errmsg), "get_slavehost_from_voluuid" - " failed for %s::%s. Please check the glusterd logs.", - slave_host, slave_vol); - gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION, - "get_slavehost_from_voluuid failed %s %s!!", - slave_host, slave_vol); + ret = glusterd_check_gsync_running_local( + volinfo->volname, old_slave_url, old_confpath, &is_running); + if (_gf_true == is_running) { + (void)snprintf(errmsg, sizeof(errmsg), + "Geo" + "-replication session between %s and %s" + " is still active. Please stop the " + "session and retry.", + volinfo->volname, old_slave_url); + ret = -1; goto out; + } } - ret = glusterd_verify_gsyncd_spawn (volinfo->volname, slave); + ret = dict_set_dynstr_with_alloc(dict, "old_slavehost", + slave1.old_slvhost); if (ret) { - snprintf (errmsg, sizeof (errmsg), "Unable to spawn gsyncd."); - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED, - "%s", errmsg); - goto out; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Unable to set old_slavehost in the dict"); + goto out; } - ret = 0; + ret = dict_set_int32(dict, "existing_session", _gf_true); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Unable to set existing_session in the dict"); + goto out; + } + } else if (ret == -2) { + snprintf(errmsg, sizeof(errmsg), + "get_slavehost_from_voluuid" + " failed for %s::%s. Please check the glusterd logs.", + slave_host, slave_vol); + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_FORCE_CREATE_SESSION, + "get_slavehost_from_voluuid failed %s %s!!", slave_host, + slave_vol); + goto out; + } + + ret = glusterd_verify_gsyncd_spawn(volinfo->volname, slave); + if (ret) { + snprintf(errmsg, sizeof(errmsg), "Unable to spawn gsyncd."); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_SPAWN_FAILED, "%s", + errmsg); + goto out; + } + + ret = 0; out: - if (ret && errmsg[0] != '\0') - *op_errstr = gf_strdup (errmsg); + if (ret && errmsg[0] != '\0') + *op_errstr = gf_strdup(errmsg); - if (slave_url_buf) - GF_FREE (slave_url_buf); + if (slave_url_buf) + GF_FREE(slave_url_buf); - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } /* pre-condition check for geo-rep pause/resume. @@ -3507,633 +3502,644 @@ out: * -1 on any check failed. */ static int -gd_pause_resume_validation (int type, glusterd_volinfo_t *volinfo, - char *slave, char *statefile, char **op_errstr) +gd_pause_resume_validation(int type, glusterd_volinfo_t *volinfo, char *slave, + char *statefile, char **op_errstr) { - int ret = 0; - char errmsg[PATH_MAX] = {0,}; - char monitor_status[NAME_MAX] = {0,}; - - GF_ASSERT (volinfo); - GF_ASSERT (slave); - GF_ASSERT (statefile); - GF_ASSERT (op_errstr); - - ret = glusterd_gsync_read_frm_status (statefile, monitor_status, - sizeof (monitor_status)); - if (ret <= 0) { - snprintf (errmsg, sizeof(errmsg), "Pause check Failed:" - " Geo-rep session is not setup"); - ret = -1; - goto out; - } - - if ( type == GF_GSYNC_OPTION_TYPE_PAUSE && - strstr (monitor_status, "Paused")) { - snprintf (errmsg, sizeof(errmsg), "Geo-replication" - " session between %s and %s already Paused.", - volinfo->volname, slave); - ret = -1; - goto out; - } - if ( type == GF_GSYNC_OPTION_TYPE_RESUME && - !strstr (monitor_status, "Paused")) { - snprintf (errmsg, sizeof(errmsg), "Geo-replication" - " session between %s and %s is not Paused.", - volinfo->volname, slave); - ret = -1; - goto out; - } - ret = 0; + int ret = 0; + char errmsg[PATH_MAX] = { + 0, + }; + char monitor_status[NAME_MAX] = { + 0, + }; + + GF_ASSERT(volinfo); + GF_ASSERT(slave); + GF_ASSERT(statefile); + GF_ASSERT(op_errstr); + + ret = glusterd_gsync_read_frm_status(statefile, monitor_status, + sizeof(monitor_status)); + if (ret <= 0) { + snprintf(errmsg, sizeof(errmsg), + "Pause check Failed:" + " Geo-rep session is not setup"); + ret = -1; + goto out; + } + + if (type == GF_GSYNC_OPTION_TYPE_PAUSE && + strstr(monitor_status, "Paused")) { + snprintf(errmsg, sizeof(errmsg), + "Geo-replication" + " session between %s and %s already Paused.", + volinfo->volname, slave); + ret = -1; + goto out; + } + if (type == GF_GSYNC_OPTION_TYPE_RESUME && + !strstr(monitor_status, "Paused")) { + snprintf(errmsg, sizeof(errmsg), + "Geo-replication" + " session between %s and %s is not Paused.", + volinfo->volname, slave); + ret = -1; + goto out; + } + ret = 0; out: - if (ret && (errmsg[0] != '\0')) { - *op_errstr = gf_strdup (errmsg); - } - return ret; + if (ret && (errmsg[0] != '\0')) { + *op_errstr = gf_strdup(errmsg); + } + return ret; } int -glusterd_op_stage_gsync_set (dict_t *dict, char **op_errstr) +glusterd_op_stage_gsync_set(dict_t *dict, char **op_errstr) { - int ret = 0; - int type = 0; - char *volname = NULL; - char *slave = NULL; - char *slave_url = NULL; - char *slave_host = NULL; - char *slave_vol = NULL; - char *down_peerstr = NULL; - char *statefile = NULL; - char statefiledir[PATH_MAX] = {0,}; - char *statedir = NULL; - char *path_list = NULL; - char *conf_path = NULL; - gf_boolean_t exists = _gf_false; - glusterd_volinfo_t *volinfo = NULL; - char errmsg[PATH_MAX] = {0,}; - dict_t *ctx = NULL; - gf_boolean_t is_force = 0; - gf_boolean_t is_running = _gf_false; - gf_boolean_t is_template_in_use = _gf_false; - uuid_t uuid = {0}; - char uuid_str [64] = {0}; - char *host_uuid = NULL; - xlator_t *this = NULL; - glusterd_conf_t *conf = NULL; - struct stat stbuf = {0,}; - - this = THIS; - GF_ASSERT (this); - conf = this->private; - GF_ASSERT (conf); - - ret = dict_get_int32 (dict, "type", &type); - if (ret < 0) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, - "command type not found"); - *op_errstr = gf_strdup ("command unsuccessful"); + int ret = 0; + int type = 0; + char *volname = NULL; + char *slave = NULL; + char *slave_url = NULL; + char *slave_host = NULL; + char *slave_vol = NULL; + char *down_peerstr = NULL; + char *statefile = NULL; + char statefiledir[PATH_MAX] = { + 0, + }; + char *statedir = NULL; + char *path_list = NULL; + char *conf_path = NULL; + gf_boolean_t exists = _gf_false; + glusterd_volinfo_t *volinfo = NULL; + char errmsg[PATH_MAX] = { + 0, + }; + dict_t *ctx = NULL; + gf_boolean_t is_force = 0; + gf_boolean_t is_running = _gf_false; + gf_boolean_t is_template_in_use = _gf_false; + uuid_t uuid = {0}; + char uuid_str[64] = {0}; + char *host_uuid = NULL; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + struct stat stbuf = { + 0, + }; + + this = THIS; + GF_ASSERT(this); + conf = this->private; + GF_ASSERT(conf); + + ret = dict_get_int32(dict, "type", &type); + if (ret < 0) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, + "command type not found"); + *op_errstr = gf_strdup("command unsuccessful"); + goto out; + } + + if (type == GF_GSYNC_OPTION_TYPE_STATUS) { + ret = glusterd_verify_gsync_status_opts(dict, op_errstr); + goto out; + } + + ret = glusterd_op_gsync_args_get(dict, op_errstr, &volname, &slave, + &host_uuid); + if (ret) + goto out; + + uuid_utoa_r(MY_UUID, uuid_str); + + if (conf->op_version < 2) { + snprintf(errmsg, sizeof(errmsg), + "One or more nodes do not" + " support the required op version."); + ret = -1; + goto out; + } + + exists = glusterd_check_volume_exists(volname); + ret = glusterd_volinfo_find(volname, &volinfo); + if ((ret) || (!exists)) { + snprintf(errmsg, sizeof(errmsg), + "Volume name %s does not" + " exist", + volname); + ret = -1; + goto out; + } + + ret = glusterd_get_slave_details_confpath(volinfo, dict, &slave_url, + &slave_host, &slave_vol, + &conf_path, op_errstr); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVEINFO_FETCH_ERROR, + "Unable to fetch slave or confpath details."); + ret = -1; + goto out; + } + + is_force = dict_get_str_boolean(dict, "force", _gf_false); + + ret = glusterd_get_statefile_name(volinfo, slave, conf_path, &statefile, + &is_template_in_use); + if (ret) { + if (!strstr(slave, "::")) { + snprintf(errmsg, sizeof(errmsg), "%s is not a valid slave url.", + slave); + ret = -1; + goto out; + } else { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SLAVE_URL_INVALID, + "state_file entry missing in config file (%s)", conf_path); + + if ((type == GF_GSYNC_OPTION_TYPE_STOP) && is_force) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_STOP_FORCE, + "Allowing stop " + "force to bypass missing statefile " + "entry in config file (%s), and " + "template file", + conf_path); + ret = 0; + } else goto out; } - - if (type == GF_GSYNC_OPTION_TYPE_STATUS) { - ret = glusterd_verify_gsync_status_opts (dict, op_errstr); - goto out; + } else { + ret = dict_set_str(dict, "statefile", statefile); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Unable to store statefile path"); + goto out; } + } - ret = glusterd_op_gsync_args_get (dict, op_errstr, - &volname, &slave, &host_uuid); - if (ret) - goto out; - - uuid_utoa_r (MY_UUID, uuid_str); - - if (conf->op_version < 2) { - snprintf (errmsg, sizeof(errmsg), "One or more nodes do not" - " support the required op version."); - ret = -1; - goto out; + /* Allowing stop force to bypass the statefile check + * as this command acts as a fail safe method to stop geo-rep + * session. */ + if (!((type == GF_GSYNC_OPTION_TYPE_STOP) && is_force)) { + /* check session directory as statefile may not present + * during upgrade */ + if (snprintf(statefiledir, sizeof(statefiledir), "%s", statefile) >= + sizeof(statefiledir)) { + snprintf(errmsg, sizeof(errmsg), "Failed copying statefiledir"); + ret = -1; + goto out; } + statedir = dirname(statefiledir); - exists = glusterd_check_volume_exists (volname); - ret = glusterd_volinfo_find (volname, &volinfo); - if ((ret) || (!exists)) { - snprintf (errmsg, sizeof(errmsg), "Volume name %s does not" - " exist", volname); + ret = sys_lstat(statedir, &stbuf); + if (ret) { + snprintf(errmsg, sizeof(errmsg), + "Geo-replication" + " session between %s and %s does not exist.", + volinfo->volname, slave); + gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED, + "%s. statefile = %s", errmsg, statefile); + ret = -1; + goto out; + } + } + + /* Check if all peers that are a part of the volume are up or not */ + if ((type == GF_GSYNC_OPTION_TYPE_DELETE) || + ((type == GF_GSYNC_OPTION_TYPE_STOP) && !is_force) || + (type == GF_GSYNC_OPTION_TYPE_PAUSE) || + (type == GF_GSYNC_OPTION_TYPE_RESUME)) { + if (!strcmp(uuid_str, host_uuid)) { + ret = glusterd_are_vol_all_peers_up(volinfo, &conf->peers, + &down_peerstr); + if (ret == _gf_false) { + snprintf(errmsg, sizeof(errmsg), + "Peer %s," + " which is a part of %s volume, is" + " down. Please bring up the peer and" + " retry.", + down_peerstr, volinfo->volname); ret = -1; + GF_FREE(down_peerstr); + down_peerstr = NULL; goto out; + } } + } - ret = glusterd_get_slave_details_confpath (volinfo, dict, &slave_url, - &slave_host, &slave_vol, - &conf_path, op_errstr); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVEINFO_FETCH_ERROR, - "Unable to fetch slave or confpath details."); + switch (type) { + case GF_GSYNC_OPTION_TYPE_START: + if (is_template_in_use) { + snprintf(errmsg, sizeof(errmsg), + "state-file entry " + "missing in the config file(%s).", + conf_path); ret = -1; goto out; - } - - is_force = dict_get_str_boolean (dict, "force", _gf_false); - - ret = glusterd_get_statefile_name (volinfo, slave, - conf_path, &statefile, - &is_template_in_use); - if (ret) { - if (!strstr(slave, "::")) { - snprintf (errmsg, sizeof(errmsg), - "%s is not a valid slave url.", slave); - ret = -1; - goto out; - } else { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_SLAVE_URL_INVALID, - "state_file entry missing in config file (%s)", - conf_path); - - if ((type == GF_GSYNC_OPTION_TYPE_STOP) && is_force) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_STOP_FORCE, "Allowing stop " - "force to bypass missing statefile " - "entry in config file (%s), and " - "template file", conf_path); - ret = 0; - } else - goto out; - } - } else { - ret = dict_set_str (dict, "statefile", statefile); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_SET_FAILED, - "Unable to store statefile path"); - goto out; - } - } - - /* Allowing stop force to bypass the statefile check - * as this command acts as a fail safe method to stop geo-rep - * session. */ - if (!((type == GF_GSYNC_OPTION_TYPE_STOP) && is_force)) { - - /* check session directory as statefile may not present - * during upgrade */ - if (snprintf (statefiledir , sizeof (statefiledir), "%s", - statefile) >= sizeof (statefiledir)) { - snprintf (errmsg, sizeof (errmsg), - "Failed copying statefiledir"); - ret = -1; - goto out; - } - statedir = dirname (statefiledir); - - ret = sys_lstat (statedir, &stbuf); - if (ret) { - snprintf (errmsg, sizeof(errmsg), "Geo-replication" - " session between %s and %s does not exist.", - volinfo->volname, slave); - gf_msg (this->name, GF_LOG_ERROR, ENOENT, - GD_MSG_FILE_OP_FAILED, - "%s. statefile = %s", errmsg, statefile); - ret = -1; - goto out; - } - } + } - /* Check if all peers that are a part of the volume are up or not */ - if ((type == GF_GSYNC_OPTION_TYPE_DELETE) || - ((type == GF_GSYNC_OPTION_TYPE_STOP) && !is_force) || - (type == GF_GSYNC_OPTION_TYPE_PAUSE) || - (type == GF_GSYNC_OPTION_TYPE_RESUME)) { - if (!strcmp (uuid_str, host_uuid)) { - ret = glusterd_are_vol_all_peers_up (volinfo, - &conf->peers, - &down_peerstr); - if (ret == _gf_false) { - snprintf (errmsg, sizeof (errmsg), "Peer %s," - " which is a part of %s volume, is" - " down. Please bring up the peer and" - " retry.", down_peerstr, - volinfo->volname); - ret = -1; - GF_FREE (down_peerstr); - down_peerstr = NULL; - goto out; - } + ret = glusterd_op_verify_gsync_start_options( + volinfo, slave, conf_path, statefile, op_errstr, is_force); + if (ret) + goto out; + ctx = glusterd_op_get_ctx(); + if (ctx) { + /* gsyncd does a fuse mount to start + * the geo-rep session */ + if (!glusterd_is_fuse_available()) { + gf_msg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_GEO_REP_START_FAILED, + "Unable " + "to open /dev/fuse (%s), " + "geo-replication start failed", + strerror(errno)); + snprintf(errmsg, sizeof(errmsg), "fuse unavailable"); + ret = -1; + goto out; } - } + } + break; - switch (type) { - case GF_GSYNC_OPTION_TYPE_START: + case GF_GSYNC_OPTION_TYPE_STOP: + if (!is_force) { if (is_template_in_use) { - snprintf (errmsg, sizeof(errmsg), "state-file entry " - "missing in the config file(%s).", - conf_path); - ret = -1; - goto out; + snprintf(errmsg, sizeof(errmsg), + "state-file entry missing in " + "the config file(%s).", + conf_path); + ret = -1; + goto out; } - ret = glusterd_op_verify_gsync_start_options (volinfo, slave, - conf_path, - statefile, - op_errstr, is_force); - if (ret) + ret = glusterd_op_verify_gsync_running(volinfo, slave, + conf_path, op_errstr); + if (ret) { + ret = glusterd_get_local_brickpaths(volinfo, &path_list); + if (!path_list && ret == -1) goto out; - ctx = glusterd_op_get_ctx(); - if (ctx) { - /* gsyncd does a fuse mount to start - * the geo-rep session */ - if (!glusterd_is_fuse_available ()) { - gf_msg ("glusterd", GF_LOG_ERROR, errno, - GD_MSG_GEO_REP_START_FAILED, "Unable " - "to open /dev/fuse (%s), " - "geo-replication start failed", - strerror (errno)); - snprintf (errmsg, sizeof(errmsg), - "fuse unavailable"); - ret = -1; - goto out; - } } - break; - case GF_GSYNC_OPTION_TYPE_STOP: - if (!is_force) { - if (is_template_in_use) { - snprintf (errmsg, sizeof(errmsg), - "state-file entry missing in " - "the config file(%s).", conf_path); - ret = -1; - goto out; - } - - ret = glusterd_op_verify_gsync_running (volinfo, slave, - conf_path, - op_errstr); - if (ret) { - ret = glusterd_get_local_brickpaths (volinfo, - &path_list); - if (!path_list && ret == -1) - goto out; - } - - /* Check for geo-rep session is active or not for - * configured user.*/ - ret = glusterd_gsync_get_uuid (slave, volinfo, uuid); - if (ret) { - snprintf (errmsg, sizeof(errmsg), - "Geo-replication session between %s " - "and %s does not exist.", - volinfo->volname, slave); - ret = -1; - goto out; - } + /* Check for geo-rep session is active or not for + * configured user.*/ + ret = glusterd_gsync_get_uuid(slave, volinfo, uuid); + if (ret) { + snprintf(errmsg, sizeof(errmsg), + "Geo-replication session between %s " + "and %s does not exist.", + volinfo->volname, slave); + ret = -1; + goto out; } - break; + } + break; case GF_GSYNC_OPTION_TYPE_PAUSE: case GF_GSYNC_OPTION_TYPE_RESUME: - if (is_template_in_use) { - snprintf (errmsg, sizeof(errmsg), - "state-file entry missing in " - "the config file(%s).", conf_path); - ret = -1; - goto out; - } + if (is_template_in_use) { + snprintf(errmsg, sizeof(errmsg), + "state-file entry missing in " + "the config file(%s).", + conf_path); + ret = -1; + goto out; + } - ret = glusterd_op_verify_gsync_running (volinfo, slave, - conf_path, op_errstr); - if (ret) { - ret = glusterd_get_local_brickpaths (volinfo, - &path_list); - if (!path_list && ret == -1) - goto out; - } + ret = glusterd_op_verify_gsync_running(volinfo, slave, conf_path, + op_errstr); + if (ret) { + ret = glusterd_get_local_brickpaths(volinfo, &path_list); + if (!path_list && ret == -1) + goto out; + } + + /* Check for geo-rep session is active or not + * for configured user.*/ + ret = glusterd_gsync_get_uuid(slave, volinfo, uuid); + if (ret) { + snprintf(errmsg, sizeof(errmsg), + "Geo-replication" + " session between %s and %s does not exist.", + volinfo->volname, slave); + ret = -1; + goto out; + } - /* Check for geo-rep session is active or not - * for configured user.*/ - ret = glusterd_gsync_get_uuid (slave, volinfo, uuid); + if (!is_force) { + ret = gd_pause_resume_validation(type, volinfo, slave, + statefile, op_errstr); if (ret) { - snprintf (errmsg, sizeof(errmsg), "Geo-replication" - " session between %s and %s does not exist.", - volinfo->volname, slave); - ret = -1; + ret = glusterd_get_local_brickpaths(volinfo, &path_list); + if (!path_list && ret == -1) goto out; } - - if (!is_force) { - ret = gd_pause_resume_validation (type, volinfo, slave, - statefile, op_errstr); - if (ret) { - ret = glusterd_get_local_brickpaths (volinfo, - &path_list); - if (!path_list && ret == -1) - goto out; - } - } - break; + } + break; case GF_GSYNC_OPTION_TYPE_CONFIG: - if (is_template_in_use) { - snprintf (errmsg, sizeof(errmsg), "state-file entry " - "missing in the config file(%s).", - conf_path); - ret = -1; - goto out; - } - - ret = gsync_verify_config_options (dict, op_errstr, volname); + if (is_template_in_use) { + snprintf(errmsg, sizeof(errmsg), + "state-file entry " + "missing in the config file(%s).", + conf_path); + ret = -1; goto out; - break; + } + + ret = gsync_verify_config_options(dict, op_errstr, volname); + goto out; + break; case GF_GSYNC_OPTION_TYPE_DELETE: - /* Check if the gsync session is still running - * If so ask the user to stop geo-replication first.*/ - if (is_template_in_use) { - snprintf (errmsg, sizeof(errmsg), "state-file entry " - "missing in the config file(%s).", - conf_path); - ret = -1; - goto out; - } + /* Check if the gsync session is still running + * If so ask the user to stop geo-replication first.*/ + if (is_template_in_use) { + snprintf(errmsg, sizeof(errmsg), + "state-file entry " + "missing in the config file(%s).", + conf_path); + ret = -1; + goto out; + } - ret = glusterd_gsync_get_uuid (slave, volinfo, uuid); - if (ret) { - snprintf (errmsg, sizeof(errmsg), "Geo-replication" - " session between %s and %s does not exist.", - volinfo->volname, slave); - ret = -1; - goto out; - } else { - ret = glusterd_check_gsync_running_local (volinfo->volname, - slave, conf_path, - &is_running); - if (_gf_true == is_running) { - snprintf (errmsg, sizeof (errmsg), GEOREP - " session between %s & %s is " - "still active. Please stop the " - "session and retry.", - volinfo->volname, slave); - ret = -1; - goto out; - } + ret = glusterd_gsync_get_uuid(slave, volinfo, uuid); + if (ret) { + snprintf(errmsg, sizeof(errmsg), + "Geo-replication" + " session between %s and %s does not exist.", + volinfo->volname, slave); + ret = -1; + goto out; + } else { + ret = glusterd_check_gsync_running_local( + volinfo->volname, slave, conf_path, &is_running); + if (_gf_true == is_running) { + snprintf(errmsg, sizeof(errmsg), + GEOREP + " session between %s & %s is " + "still active. Please stop the " + "session and retry.", + volinfo->volname, slave); + ret = -1; + goto out; } + } - ret = glusterd_verify_gsyncd_spawn (volinfo->volname, slave); - if (ret) { - snprintf (errmsg, sizeof (errmsg), - "Unable to spawn gsyncd"); - } + ret = glusterd_verify_gsyncd_spawn(volinfo->volname, slave); + if (ret) { + snprintf(errmsg, sizeof(errmsg), "Unable to spawn gsyncd"); + } - break; - } + break; + } out: - if (path_list) - GF_FREE (path_list); + if (path_list) + GF_FREE(path_list); - if (ret && errmsg[0] != '\0') { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, - "%s", errmsg); - *op_errstr = gf_strdup (errmsg); - } + if (ret && errmsg[0] != '\0') { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, "%s", errmsg); + *op_errstr = gf_strdup(errmsg); + } - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } static int -gd_pause_or_resume_gsync (dict_t *dict, char *master, char *slave, - char *slave_host, char *slave_vol, char *conf_path, - char **op_errstr, gf_boolean_t is_pause) +gd_pause_or_resume_gsync(dict_t *dict, char *master, char *slave, + char *slave_host, char *slave_vol, char *conf_path, + char **op_errstr, gf_boolean_t is_pause) { - int32_t ret = 0; - int pfd = -1; - pid_t pid = 0; - char pidfile[PATH_MAX] = {0,}; - char errmsg[PATH_MAX] = ""; - char buf [4096] = {0,}; - gf_boolean_t is_template_in_use = _gf_false; - char monitor_status[NAME_MAX] = {0,}; - char *statefile = NULL; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - GF_ASSERT (dict); - GF_ASSERT (master); - GF_ASSERT (slave); - GF_ASSERT (slave_host); - GF_ASSERT (slave_vol); - GF_ASSERT (conf_path); - - pfd = gsyncd_getpidfile (master, slave, pidfile, - conf_path, &is_template_in_use); - if (pfd == -2) { - snprintf (errmsg, sizeof(errmsg), - "pid-file entry mising in config file and " - "template config file."); - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND, - "%s", errmsg); - *op_errstr = gf_strdup (errmsg); - ret = -1; + int32_t ret = 0; + int pfd = -1; + pid_t pid = 0; + char pidfile[PATH_MAX] = { + 0, + }; + char errmsg[PATH_MAX] = ""; + char buf[4096] = { + 0, + }; + gf_boolean_t is_template_in_use = _gf_false; + char monitor_status[NAME_MAX] = { + 0, + }; + char *statefile = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + GF_ASSERT(dict); + GF_ASSERT(master); + GF_ASSERT(slave); + GF_ASSERT(slave_host); + GF_ASSERT(slave_vol); + GF_ASSERT(conf_path); + + pfd = gsyncd_getpidfile(master, slave, pidfile, conf_path, + &is_template_in_use); + if (pfd == -2) { + snprintf(errmsg, sizeof(errmsg), + "pid-file entry mising in config file and " + "template config file."); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND, "%s", + errmsg); + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; + } + + if (gsync_status_byfd(pfd) == -1) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, + "gsyncd b/w %s & %s is not running", master, slave); + /* monitor gsyncd already dead */ + goto out; + } + + if (pfd < 0) + goto out; + + /* Prepare to update status file*/ + ret = dict_get_str(dict, "statefile", &statefile); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Pause/Resume Failed: Unable to fetch statefile path"); + goto out; + } + ret = glusterd_gsync_read_frm_status(statefile, monitor_status, + sizeof(monitor_status)); + if (ret <= 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STAT_FILE_READ_FAILED, + "Pause/Resume Failed: " + "Unable to read status file for %s(master)" + " %s(slave)", + master, slave); + goto out; + } + + ret = sys_read(pfd, buf, sizeof(buf)); + if (ret > 0) { + pid = strtol(buf, NULL, 10); + if (is_pause) { + ret = kill(-pid, SIGSTOP); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_PID_KILL_FAIL, + "Failed" + " to pause gsyncd. Error: %s", + strerror(errno)); goto out; - } - - if (gsync_status_byfd (pfd) == -1) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, - "gsyncd b/w %s & %s is not running", master, slave); - /* monitor gsyncd already dead */ - goto out; - } - - if (pfd < 0) - goto out; - - /* Prepare to update status file*/ - ret = dict_get_str (dict, "statefile", &statefile); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Pause/Resume Failed: Unable to fetch statefile path"); + } + /*On pause force, if status is already paused + do not update status again*/ + if (strstr(monitor_status, "Paused")) + goto out; + + ret = glusterd_create_status_file(master, slave, slave_host, + slave_vol, "Paused"); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_UPDATE_STATEFILE_FAILED, + "Unable to update state_file." + " Error : %s", + strerror(errno)); + /* If status cannot be updated resume back */ + if (kill(-pid, SIGCONT)) { + snprintf(errmsg, sizeof(errmsg), + "Pause successful but could " + "not update status file. " + "Please use 'resume force' to" + " resume back and retry pause" + " to reflect in status"); + gf_msg(this->name, GF_LOG_ERROR, errno, + GD_MSG_PID_KILL_FAIL, + "Resume back Failed. Error:" + "%s", + strerror(errno)); + *op_errstr = gf_strdup(errmsg); + } goto out; - } - ret = glusterd_gsync_read_frm_status (statefile, monitor_status, - sizeof (monitor_status)); - if (ret <= 0) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_STAT_FILE_READ_FAILED, "Pause/Resume Failed: " - "Unable to read status file for %s(master)" - " %s(slave)", master, slave); + } + } else { + ret = glusterd_create_status_file(master, slave, slave_host, + slave_vol, "Started"); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_UPDATE_STATEFILE_FAILED, + "Resume Failed: Unable to update " + "state_file. Error : %s", + strerror(errno)); goto out; - } - - ret = sys_read (pfd, buf, sizeof (buf)); - if (ret > 0) { - pid = strtol (buf, NULL, 10); - if (is_pause) { - ret = kill (-pid, SIGSTOP); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_PID_KILL_FAIL, "Failed" - " to pause gsyncd. Error: %s", - strerror (errno)); - goto out; - } - /*On pause force, if status is already paused - do not update status again*/ - if (strstr (monitor_status, "Paused")) - goto out; - - ret = glusterd_create_status_file ( master, slave, - slave_host, slave_vol, - "Paused"); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_UPDATE_STATEFILE_FAILED, - "Unable to update state_file." - " Error : %s", strerror (errno)); - /* If status cannot be updated resume back */ - if (kill (-pid, SIGCONT)) { - snprintf (errmsg, sizeof(errmsg), - "Pause successful but could " - "not update status file. " - "Please use 'resume force' to" - " resume back and retry pause" - " to reflect in status"); - gf_msg (this->name, GF_LOG_ERROR, - errno, - GD_MSG_PID_KILL_FAIL, - "Resume back Failed. Error:" - "%s", strerror (errno)); - *op_errstr = gf_strdup (errmsg); - } - goto out; - } - } else { - ret = glusterd_create_status_file (master, slave, - slave_host, - slave_vol, - "Started"); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_UPDATE_STATEFILE_FAILED, - "Resume Failed: Unable to update " - "state_file. Error : %s", - strerror (errno)); - goto out; - } - ret = kill (-pid, SIGCONT); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_PID_KILL_FAIL, - "Resumed Failed: Unable to send" - " SIGCONT. Error: %s", - strerror (errno)); - /* Process can't be resumed, update status - * back to paused. */ - ret = glusterd_create_status_file (master, - slave, - slave_host, - slave_vol, - monitor_status); - if (ret) { - snprintf (errmsg, sizeof(errmsg), - "Resume failed!!! Status " - "inconsistent. Please use " - "'resume force' to resume and" - " reach consistent state"); - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_STATUS_UPDATE_FAILED, - "Updating status back to paused" - " Failed. Error: %s", - strerror (errno)); - *op_errstr = gf_strdup (errmsg); - } - goto out; - } + } + ret = kill(-pid, SIGCONT); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_PID_KILL_FAIL, + "Resumed Failed: Unable to send" + " SIGCONT. Error: %s", + strerror(errno)); + /* Process can't be resumed, update status + * back to paused. */ + ret = glusterd_create_status_file(master, slave, slave_host, + slave_vol, monitor_status); + if (ret) { + snprintf(errmsg, sizeof(errmsg), + "Resume failed!!! Status " + "inconsistent. Please use " + "'resume force' to resume and" + " reach consistent state"); + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_STATUS_UPDATE_FAILED, + "Updating status back to paused" + " Failed. Error: %s", + strerror(errno)); + *op_errstr = gf_strdup(errmsg); } + goto out; + } } - ret = 0; + } + ret = 0; out: - sys_close (pfd); - return ret; + sys_close(pfd); + return ret; } static int -stop_gsync (char *master, char *slave, char **msg, - char *conf_path, char **op_errstr, - gf_boolean_t is_force) +stop_gsync(char *master, char *slave, char **msg, char *conf_path, + char **op_errstr, gf_boolean_t is_force) { - int32_t ret = 0; - int pfd = -1; - pid_t pid = 0; - char pidfile[PATH_MAX] = {0,}; - char errmsg[PATH_MAX] = ""; - char buf[4096] = {0,}; - int i = 0; - gf_boolean_t is_template_in_use = _gf_false; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (this->private); - - pfd = gsyncd_getpidfile (master, slave, pidfile, - conf_path, &is_template_in_use); - if (pfd == -2) { - snprintf (errmsg, sizeof(errmsg) - 1, - "pid-file entry mising in config file and " - "template config file."); - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND, - "%s", errmsg); - *op_errstr = gf_strdup (errmsg); - ret = -1; - goto out; - } - if (gsync_status_byfd (pfd) == -1 && !is_force) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, - "gsyncd b/w %s & %s is not running", master, - slave); - /* monitor gsyncd already dead */ - goto out; - } - - if (pfd < 0) - goto out; - - ret = sys_read (pfd, buf, sizeof (buf)); - if (ret > 0) { - pid = strtol (buf, NULL, 10); - ret = kill (-pid, SIGTERM); - if (ret && !is_force) { - gf_msg (this->name, GF_LOG_WARNING, errno, - GD_MSG_PID_KILL_FAIL, - "failed to kill gsyncd"); - goto out; - } - for (i = 0; i < 20; i++) { - if (gsync_status_byfd (pfd) == -1) { - /* monitor gsyncd is dead but worker may - * still be alive, give some more time - * before SIGKILL (hack) - */ - usleep (50000); - break; - } - usleep (50000); - } - kill (-pid, SIGKILL); - sys_unlink (pidfile); + int32_t ret = 0; + int pfd = -1; + pid_t pid = 0; + char pidfile[PATH_MAX] = { + 0, + }; + char errmsg[PATH_MAX] = ""; + char buf[4096] = { + 0, + }; + int i = 0; + gf_boolean_t is_template_in_use = _gf_false; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(this->private); + + pfd = gsyncd_getpidfile(master, slave, pidfile, conf_path, + &is_template_in_use); + if (pfd == -2) { + snprintf(errmsg, sizeof(errmsg) - 1, + "pid-file entry mising in config file and " + "template config file."); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PIDFILE_NOT_FOUND, "%s", + errmsg); + *op_errstr = gf_strdup(errmsg); + ret = -1; + goto out; + } + if (gsync_status_byfd(pfd) == -1 && !is_force) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, + "gsyncd b/w %s & %s is not running", master, slave); + /* monitor gsyncd already dead */ + goto out; + } + + if (pfd < 0) + goto out; + + ret = sys_read(pfd, buf, sizeof(buf)); + if (ret > 0) { + pid = strtol(buf, NULL, 10); + ret = kill(-pid, SIGTERM); + if (ret && !is_force) { + gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_PID_KILL_FAIL, + "failed to kill gsyncd"); + goto out; + } + for (i = 0; i < 20; i++) { + if (gsync_status_byfd(pfd) == -1) { + /* monitor gsyncd is dead but worker may + * still be alive, give some more time + * before SIGKILL (hack) + */ + usleep(50000); + break; + } + usleep(50000); } - ret = 0; + kill(-pid, SIGKILL); + sys_unlink(pidfile); + } + ret = 0; out: - sys_close (pfd); + sys_close(pfd); - return ret; + return ret; } /* @@ -4149,2565 +4155,2539 @@ out: */ int -glusterd_gsync_op_already_set (char* master, char* slave, char* conf_path, - char* op_name, char* op_value) +glusterd_gsync_op_already_set(char *master, char *slave, char *conf_path, + char *op_name, char *op_value) { - dict_t *confd = NULL; - char *op_val_buf = NULL; - int32_t op_val_conf = 0; - int32_t op_val_cli = 0; - int32_t ret = -1; - gf_boolean_t is_bool = _gf_true; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - confd = dict_new (); - if (!confd) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, - "Not able to create dict."); - return -1; - } - - ret = glusterd_gsync_get_config (master, slave, conf_path, - confd); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GET_CONFIG_INFO_FAILED, - "Unable to get configuration data for %s(master), " - "%s(slave)", master, slave); - goto out; - } - - ret = dict_get_param (confd, op_name, &op_val_buf); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get op_value for %s(master), %s(slave). " - "Please check gsync config file.", master, slave); - ret = 1; - goto out; - } - - gf_msg_debug (this->name, 0, "val_cli:%s val_conf:%s", op_value, - op_val_buf); - - if (!strcmp(op_val_buf,"true") || !strcmp(op_val_buf,"1") - || !strcmp(op_val_buf,"yes")) { - op_val_conf = 1; - } else if(!strcmp(op_val_buf,"false") || !strcmp(op_val_buf,"0") - || !strcmp(op_val_buf,"no")) { - op_val_conf = 0; + dict_t *confd = NULL; + char *op_val_buf = NULL; + int32_t op_val_conf = 0; + int32_t op_val_cli = 0; + int32_t ret = -1; + gf_boolean_t is_bool = _gf_true; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + confd = dict_new(); + if (!confd) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, + "Not able to create dict."); + return -1; + } + + ret = glusterd_gsync_get_config(master, slave, conf_path, confd); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED, + "Unable to get configuration data for %s(master), " + "%s(slave)", + master, slave); + goto out; + } + + ret = dict_get_param(confd, op_name, &op_val_buf); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get op_value for %s(master), %s(slave). " + "Please check gsync config file.", + master, slave); + ret = 1; + goto out; + } + + gf_msg_debug(this->name, 0, "val_cli:%s val_conf:%s", op_value, + op_val_buf); + + if (!strcmp(op_val_buf, "true") || !strcmp(op_val_buf, "1") || + !strcmp(op_val_buf, "yes")) { + op_val_conf = 1; + } else if (!strcmp(op_val_buf, "false") || !strcmp(op_val_buf, "0") || + !strcmp(op_val_buf, "no")) { + op_val_conf = 0; + } else { + is_bool = _gf_false; + } + + if (is_bool) { + if (!strcmp(op_value, "true") || !strcmp(op_value, "1") || + !strcmp(op_value, "yes")) { + op_val_cli = 1; } else { - is_bool = _gf_false; + op_val_cli = 0; } - if (is_bool) { - if (!strcmp(op_value,"true") || !strcmp(op_value,"1") - || !strcmp(op_value,"yes")) { - op_val_cli = 1; - } else { - op_val_cli = 0; - } - - if ( op_val_cli == op_val_conf ) { - ret = 0; - goto out; - } - } else { - if (!strcmp(op_val_buf,op_value)) { - ret = 0; - goto out; - } + if (op_val_cli == op_val_conf) { + ret = 0; + goto out; + } + } else { + if (!strcmp(op_val_buf, op_value)) { + ret = 0; + goto out; } + } - ret = 1; + ret = 1; out: - dict_unref(confd); - return ret; + dict_unref(confd); + return ret; } static int -glusterd_gsync_configure (glusterd_volinfo_t *volinfo, char *slave, - char *path_list, dict_t *dict, - dict_t *resp_dict, char **op_errstr) +glusterd_gsync_configure(glusterd_volinfo_t *volinfo, char *slave, + char *path_list, dict_t *dict, dict_t *resp_dict, + char **op_errstr) { - int32_t ret = -1; - char *op_name = NULL; - char *op_value = NULL; - runner_t runner = {0,}; - glusterd_conf_t *priv = NULL; - char *subop = NULL; - char *master = NULL; - char *conf_path = NULL; - char *slave_host = NULL; - char *slave_vol = NULL; - struct stat stbuf = {0, }; - gf_boolean_t restart_required = _gf_true; - char **resopt = NULL; - gf_boolean_t op_already_set = _gf_false; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (slave); - GF_ASSERT (op_errstr); - GF_ASSERT (dict); - GF_ASSERT (resp_dict); - - ret = dict_get_str (dict, "subop", &subop); - if (ret != 0) - goto out; + int32_t ret = -1; + char *op_name = NULL; + char *op_value = NULL; + runner_t runner = { + 0, + }; + glusterd_conf_t *priv = NULL; + char *subop = NULL; + char *master = NULL; + char *conf_path = NULL; + char *slave_host = NULL; + char *slave_vol = NULL; + struct stat stbuf = { + 0, + }; + gf_boolean_t restart_required = _gf_true; + char **resopt = NULL; + gf_boolean_t op_already_set = _gf_false; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(slave); + GF_ASSERT(op_errstr); + GF_ASSERT(dict); + GF_ASSERT(resp_dict); + + ret = dict_get_str(dict, "subop", &subop); + if (ret != 0) + goto out; + + if (strcmp(subop, "get") == 0 || strcmp(subop, "get-all") == 0) { + /* deferred to cli */ + gf_msg_debug(this->name, 0, "Returning 0"); + return 0; + } - if (strcmp (subop, "get") == 0 || strcmp (subop, "get-all") == 0) { - /* deferred to cli */ - gf_msg_debug (this->name, 0, "Returning 0"); - return 0; - } + ret = dict_get_str(dict, "op_name", &op_name); + if (ret != 0) + goto out; - ret = dict_get_str (dict, "op_name", &op_name); + if (strtail(subop, "set")) { + ret = dict_get_str(dict, "op_value", &op_value); if (ret != 0) - goto out; - - if (strtail (subop, "set")) { - ret = dict_get_str (dict, "op_value", &op_value); - if (ret != 0) - goto out; - } - - priv = THIS->private; - if (priv == NULL) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GLUSTERD_PRIV_NOT_FOUND, - "priv of glusterd not present"); - *op_errstr = gf_strdup ("glusterd defunct"); - goto out; + goto out; + } + + priv = THIS->private; + if (priv == NULL) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_PRIV_NOT_FOUND, + "priv of glusterd not present"); + *op_errstr = gf_strdup("glusterd defunct"); + goto out; + } + + ret = dict_get_str(dict, "conf_path", &conf_path); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to fetch conf file path."); + goto out; + } + + master = ""; + runinit(&runner); + runner_add_args(&runner, GSYNCD_PREFIX "/gsyncd", "-c", NULL); + runner_argprintf(&runner, "%s", conf_path); + runner_argprintf(&runner, "--iprefix=%s", DATADIR); + if (volinfo) { + master = volinfo->volname; + runner_argprintf(&runner, ":%s", master); + } + runner_add_arg(&runner, slave); + runner_argprintf(&runner, "--config-%s", subop); + runner_add_arg(&runner, op_name); + if (op_value) + runner_add_arg(&runner, op_value); + + if (strcmp(op_name, "checkpoint") != 0 && strtail(subop, "set")) { + ret = glusterd_gsync_op_already_set(master, slave, conf_path, op_name, + op_value); + if (ret == -1) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GSYNCD_OP_SET_FAILED, + "glusterd_gsync_op_already_set failed."); + gf_asprintf(op_errstr, + GEOREP + " config-%s failed for " + "%s %s", + subop, master, slave); + goto out; } - - ret = dict_get_str (dict, "conf_path", &conf_path); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to fetch conf file path."); - goto out; + if (ret == 0) { + gf_msg_debug(this->name, 0, "op_value is already set"); + op_already_set = _gf_true; + goto out; } + } - master = ""; - runinit (&runner); - runner_add_args (&runner, GSYNCD_PREFIX"/gsyncd", "-c", NULL); - runner_argprintf (&runner, "%s", conf_path); - runner_argprintf (&runner, "--iprefix=%s", DATADIR); - if (volinfo) { - master = volinfo->volname; - runner_argprintf (&runner, ":%s", master); - } - runner_add_arg (&runner, slave); - runner_argprintf (&runner, "--config-%s", subop); - runner_add_arg (&runner, op_name); - if (op_value) - runner_add_arg (&runner, op_value); - - if ( strcmp(op_name,"checkpoint") != 0 && strtail (subop, "set")) { - ret = glusterd_gsync_op_already_set(master,slave,conf_path, - op_name,op_value); - if (ret == -1) { - gf_msg (this->name, GF_LOG_WARNING, 0, - GD_MSG_GSYNCD_OP_SET_FAILED, - "glusterd_gsync_op_already_set failed."); - gf_asprintf (op_errstr, GEOREP" config-%s failed for " - "%s %s", subop, master, slave); - goto out; - } - if (ret == 0) { - gf_msg_debug (this->name, 0, "op_value is already set"); - op_already_set = _gf_true; - goto out; - } - } + synclock_unlock(&priv->big_lock); + ret = runner_run(&runner); + synclock_lock(&priv->big_lock); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GSYNCD_ERROR, + "gsyncd failed to %s %s option for " + "%s %s peers", + subop, op_name, master, slave); - synclock_unlock (&priv->big_lock); - ret = runner_run (&runner); - synclock_lock (&priv->big_lock); - if (ret) { - gf_msg (this->name, GF_LOG_WARNING, 0, GD_MSG_GSYNCD_ERROR, - "gsyncd failed to %s %s option for " - "%s %s peers", subop, op_name, master, - slave); + gf_asprintf(op_errstr, GEOREP " config-%s failed for %s %s", subop, + master, slave); - gf_asprintf (op_errstr, GEOREP" config-%s failed for %s %s", - subop, master, slave); + goto out; + } + if ((!strcmp(op_name, "state_file")) && (op_value)) { + ret = sys_lstat(op_value, &stbuf); + if (ret) { + ret = dict_get_str(dict, "slave_host", &slave_host); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to fetch slave host."); goto out; - } + } - if ((!strcmp (op_name, "state_file")) && (op_value)) { + ret = dict_get_str(dict, "slave_vol", &slave_vol); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to fetch slave volume name."); + goto out; + } - ret = sys_lstat (op_value, &stbuf); - if (ret) { - ret = dict_get_str (dict, "slave_host", &slave_host); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, - "Unable to fetch slave host."); - goto out; - } - - ret = dict_get_str (dict, "slave_vol", &slave_vol); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, - "Unable to fetch slave volume name."); - goto out; - } - - ret = glusterd_create_status_file (volinfo->volname, - slave, slave_host, - slave_vol, - "Switching Status " - "File"); - if (ret || sys_lstat (op_value, &stbuf)) { - gf_msg (this->name, GF_LOG_ERROR, errno, - GD_MSG_FILE_OP_FAILED, "Unable to " - "create %s. Error : %s", op_value, - strerror (errno)); - ret = -1; - goto out; - } - } + ret = glusterd_create_status_file(volinfo->volname, slave, + slave_host, slave_vol, + "Switching Status " + "File"); + if (ret || sys_lstat(op_value, &stbuf)) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, + "Unable to " + "create %s. Error : %s", + op_value, strerror(errno)); + ret = -1; + goto out; + } } + } - ret = 0; - gf_asprintf (op_errstr, "config-%s successful", subop); + ret = 0; + gf_asprintf(op_errstr, "config-%s successful", subop); out: - if (!ret && volinfo && !op_already_set) { - for (resopt = gsync_no_restart_opts; *resopt; resopt++) { - restart_required = _gf_true; - if (!strcmp ((*resopt), op_name)){ - restart_required = _gf_false; - break; - } + if (!ret && volinfo && !op_already_set) { + for (resopt = gsync_no_restart_opts; *resopt; resopt++) { + restart_required = _gf_true; + if (!strcmp((*resopt), op_name)) { + restart_required = _gf_false; + break; } + } - if (restart_required) { - ret = glusterd_check_restart_gsync_session (volinfo, slave, - resp_dict, path_list, - conf_path, 0); - if (ret) - *op_errstr = gf_strdup ("internal error"); - } + if (restart_required) { + ret = glusterd_check_restart_gsync_session( + volinfo, slave, resp_dict, path_list, conf_path, 0); + if (ret) + *op_errstr = gf_strdup("internal error"); } + } - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } int -glusterd_gsync_read_frm_status (char *path, char *buf, size_t blen) +glusterd_gsync_read_frm_status(char *path, char *buf, size_t blen) { - int ret = 0; - int status_fd = -1; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (path); - GF_ASSERT (buf); - status_fd = open (path, O_RDONLY); - if (status_fd == -1) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, - "Unable to read gsyncd status file %s", path); - return -1; - } - ret = sys_read (status_fd, buf, blen - 1); - if (ret > 0) { - size_t len = strnlen (buf, ret); - /* Ensure there is a NUL byte and that it's not the first. */ - if (len == 0 || len == blen - 1) { - ret = -1; - } else { - char *p = buf + len - 1; - while (isspace (*p)) - *p-- = '\0'; - } - } else if (ret == 0) - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, - "Status file of gsyncd is empty"); - else /* ret < 0 */ - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, - "Status file of gsyncd is corrupt"); - - sys_close (status_fd); - return ret; + int ret = 0; + int status_fd = -1; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(path); + GF_ASSERT(buf); + status_fd = open(path, O_RDONLY); + if (status_fd == -1) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, + "Unable to read gsyncd status file %s", path); + return -1; + } + ret = sys_read(status_fd, buf, blen - 1); + if (ret > 0) { + size_t len = strnlen(buf, ret); + /* Ensure there is a NUL byte and that it's not the first. */ + if (len == 0 || len == blen - 1) { + ret = -1; + } else { + char *p = buf + len - 1; + while (isspace(*p)) + *p-- = '\0'; + } + } else if (ret == 0) + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, + "Status file of gsyncd is empty"); + else /* ret < 0 */ + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GSYNCD_ERROR, + "Status file of gsyncd is corrupt"); + + sys_close(status_fd); + return ret; } static int -dict_get_param (dict_t *dict, char *key, char **param) +dict_get_param(dict_t *dict, char *key, char **param) { - char *dk = NULL; - char *s = NULL; - char x = '\0'; - int ret = 0; + char *dk = NULL; + char *s = NULL; + char x = '\0'; + int ret = 0; - if (dict_get_str (dict, key, param) == 0) - return 0; + if (dict_get_str(dict, key, param) == 0) + return 0; - dk = gf_strdup (key); - if (!dk) - return -1; + dk = gf_strdup(key); + if (!dk) + return -1; - s = strpbrk (dk, "-_"); - if (!s) { - ret = -1; - goto out; - } - x = (*s == '-') ? '_' : '-'; + s = strpbrk(dk, "-_"); + if (!s) { + ret = -1; + goto out; + } + x = (*s == '-') ? '_' : '-'; + *s++ = x; + while ((s = strpbrk(s, "-_"))) *s++ = x; - while ((s = strpbrk (s, "-_"))) - *s++ = x; - ret = dict_get_str (dict, dk, param); + ret = dict_get_str(dict, dk, param); out: - GF_FREE (dk); - return ret; + GF_FREE(dk); + return ret; } int -glusterd_fetch_values_from_config (char *master, char *slave, - char *confpath, dict_t *confd, - char **statefile, - char **georep_session_wrkng_dir, - char **socketfile) +glusterd_fetch_values_from_config(char *master, char *slave, char *confpath, + dict_t *confd, char **statefile, + char **georep_session_wrkng_dir, + char **socketfile) { - int ret = 0; - xlator_t *this = NULL; - - this = THIS; - GF_ASSERT (this); - - ret = glusterd_gsync_get_config (master, slave, confpath, - confd); + int ret = 0; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + + ret = glusterd_gsync_get_config(master, slave, confpath, confd); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_CONFIG_INFO_FAILED, + "Unable to get configuration data for %s(master), " + "%s(slave)", + master, slave); + goto out; + } + + if (statefile) { + ret = dict_get_param(confd, "state_file", statefile); if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_GET_CONFIG_INFO_FAILED, - "Unable to get configuration data for %s(master), " - "%s(slave)", master, slave); - goto out; - } - - if (statefile) { - ret = dict_get_param (confd, "state_file", statefile); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, - "Unable to get state_file's name " - "for %s(master), %s(slave). " - "Please check gsync config file.", - master, slave); - goto out; - } - } - - if (georep_session_wrkng_dir) { - ret = dict_get_param (confd, "georep_session_working_dir", - georep_session_wrkng_dir); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, - "Unable to get geo-rep session's " - "working directory name for %s(master), " - "%s(slave). Please check gsync config file.", - master, slave); - goto out; - } + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get state_file's name " + "for %s(master), %s(slave). " + "Please check gsync config file.", + master, slave); + goto out; + } + } + + if (georep_session_wrkng_dir) { + ret = dict_get_param(confd, "georep_session_working_dir", + georep_session_wrkng_dir); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get geo-rep session's " + "working directory name for %s(master), " + "%s(slave). Please check gsync config file.", + master, slave); + goto out; } + } - if (socketfile) { - ret = dict_get_param (confd, "state_socket_unencoded", - socketfile); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_GET_FAILED, - "Unable to get socket file's name " - "for %s(master), %s(slave). " - "Please check gsync config file.", - master, slave); - goto out; - } + if (socketfile) { + ret = dict_get_param(confd, "state_socket_unencoded", socketfile); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get socket file's name " + "for %s(master), %s(slave). " + "Please check gsync config file.", + master, slave); + goto out; } + } - ret = 0; + ret = 0; out: - gf_msg_debug (this->name, 0, "Returning %d", ret); - return ret; + gf_msg_debug(this->name, 0, "Returning %d", ret); + return ret; } int -glusterd_read_status_file (glusterd_volinfo_t *volinfo, char *slave, - char *conf_path, dict_t *dict, char *node) +glusterd_read_status_file(glusterd_volinfo_t *volinfo, char *slave, + char *conf_path, dict_t *dict, char *node) { - char temp_conf_path[PATH_MAX] = ""; - char *working_conf_path = NULL; - char *georep_session_wrkng_dir = NULL; - char *master = NULL; - char sts_val_name[1024] = ""; - char monitor_status[NAME_MAX] = ""; - char *statefile = NULL; - char *socketfile = NULL; - dict_t *confd = NULL; - char *slavekey = NULL; - char *slaveentry = NULL; - char *slaveuser = NULL; - char *saveptr = NULL; - char *temp = NULL; - char *temp_inp = NULL; - char *brick_host_uuid = NULL; - int brick_host_uuid_length = 0; - int gsync_count = 0; - int ret = 0; - glusterd_brickinfo_t *brickinfo = NULL; - gf_gsync_status_t *sts_val = NULL; - gf_boolean_t is_template_in_use = _gf_false; - glusterd_conf_t *priv = NULL; - struct stat stbuf = {0,}; - xlator_t *this = NULL; - int32_t len = 0; - - this = THIS; - GF_ASSERT (this); - - GF_ASSERT (this->private); - GF_ASSERT (volinfo); - GF_ASSERT (conf_path); - - master = volinfo->volname; - - confd = dict_new (); - if (!confd) { - gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, - "Not able to create dict."); - return -1; - } - - priv = THIS->private; - - len = snprintf (temp_conf_path, sizeof(temp_conf_path), - "%s/"GSYNC_CONF_TEMPLATE, priv->workdir); - if ((len < 0) || (len >= sizeof(temp_conf_path))) { - return -1; - } + char temp_conf_path[PATH_MAX] = ""; + char *working_conf_path = NULL; + char *georep_session_wrkng_dir = NULL; + char *master = NULL; + char sts_val_name[1024] = ""; + char monitor_status[NAME_MAX] = ""; + char *statefile = NULL; + char *socketfile = NULL; + dict_t *confd = NULL; + char *slavekey = NULL; + char *slaveentry = NULL; + char *slaveuser = NULL; + char *saveptr = NULL; + char *temp = NULL; + char *temp_inp = NULL; + char *brick_host_uuid = NULL; + int brick_host_uuid_length = 0; + int gsync_count = 0; + int ret = 0; + glusterd_brickinfo_t *brickinfo = NULL; + gf_gsync_status_t *sts_val = NULL; + gf_boolean_t is_template_in_use = _gf_false; + glusterd_conf_t *priv = NULL; + struct stat stbuf = { + 0, + }; + xlator_t *this = NULL; + int32_t len = 0; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(this->private); + GF_ASSERT(volinfo); + GF_ASSERT(conf_path); + + master = volinfo->volname; + + confd = dict_new(); + if (!confd) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, + "Not able to create dict."); + return -1; + } - ret = sys_lstat (conf_path, &stbuf); - if (!ret) { - gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_CONFIG_INFO, - "Using passed config template(%s).", - conf_path); - working_conf_path = conf_path; - } else { - gf_msg (this->name, GF_LOG_WARNING, ENOENT, - GD_MSG_FILE_OP_FAILED, - "Config file (%s) missing. Looking for template " - "config file (%s)", conf_path, temp_conf_path); - ret = sys_lstat (temp_conf_path, &stbuf); - if (ret) { - gf_msg (this->name, GF_LOG_ERROR, ENOENT, - GD_MSG_FILE_OP_FAILED, "Template " - "config file (%s) missing.", temp_conf_path); - goto out; - } - gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DEFAULT_TEMP_CONFIG, - "Using default config template(%s).", temp_conf_path); - working_conf_path = temp_conf_path; - is_template_in_use = _gf_true; - } + priv = THIS->private; |