diff options
Diffstat (limited to 'xlators/mgmt/glusterd')
46 files changed, 4177 insertions, 1150 deletions
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am index eaa61c435e5..685beb42d27 100644 --- a/xlators/mgmt/glusterd/src/Makefile.am +++ b/xlators/mgmt/glusterd/src/Makefile.am @@ -25,13 +25,14 @@ glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \ glusterd-conn-helper.c glusterd-snapd-svc.c glusterd-snapd-svc-helper.c \ glusterd-bitd-svc.c glusterd-scrub-svc.c glusterd-server-quorum.c \ glusterd-reset-brick.c glusterd-shd-svc.c glusterd-shd-svc-helper.c \ - glusterd-gfproxyd-svc.c glusterd-gfproxyd-svc-helper.c glusterd-ganesha.c + glusterd-gfproxyd-svc.c glusterd-gfproxyd-svc-helper.c glusterd-ganesha.c \ + $(CONTRIBDIR)/mount/mntent.c glusterd_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \ $(top_builddir)/libglusterd/src/libglusterd.la \ $(top_builddir)/rpc/xdr/src/libgfxdr.la \ $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \ - $(XML_LIBS) -lcrypto $(URCU_LIBS) $(URCU_CDS_LIBS) $(LIB_DL) + $(XML_LIBS) -lcrypto $(URCU_LIBS) $(URCU_CDS_LIBS) $(LIB_DL) $(GF_XLATOR_MGNT_LIBADD) noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \ glusterd-sm.h glusterd-store.h glusterd-mem-types.h \ @@ -46,7 +47,8 @@ noinst_HEADERS = glusterd.h glusterd-utils.h glusterd-op-sm.h \ glusterd-scrub-svc.h glusterd-server-quorum.h glusterd-errno.h \ glusterd-shd-svc.h glusterd-shd-svc-helper.h \ glusterd-gfproxyd-svc.h glusterd-gfproxyd-svc-helper.h \ - $(CONTRIBDIR)/userspace-rcu/rculist-extra.h + $(CONTRIBDIR)/userspace-rcu/rculist-extra.h \ + $(CONTRIBDIR)/mount/mntent_compat.h AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \ diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c index 9959a59e40c..37429fe9214 100644 --- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c +++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c @@ -34,6 +34,7 @@ const char *gd_bitrot_op_list[GF_BITROT_OPTION_TYPE_MAX] = { [GF_BITROT_OPTION_TYPE_SCRUB_FREQ] = "scrub-frequency", [GF_BITROT_OPTION_TYPE_SCRUB] = "scrub", [GF_BITROT_OPTION_TYPE_EXPIRY_TIME] = "expiry-time", + [GF_BITROT_OPTION_TYPE_SIGNER_THREADS] = "signer-threads", }; int @@ -354,6 +355,81 @@ out: return ret; } +static gf_boolean_t +is_bitd_configure_noop(xlator_t *this, glusterd_volinfo_t *volinfo) +{ + gf_boolean_t noop = _gf_true; + glusterd_brickinfo_t *brickinfo = NULL; + + if (!glusterd_is_bitrot_enabled(volinfo)) + goto out; + else if (volinfo->status != GLUSTERD_STATUS_STARTED) + goto out; + else { + cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list) + { + if (!glusterd_is_local_brick(this, volinfo, brickinfo)) + continue; + noop = _gf_false; + return noop; + } + } +out: + return noop; +} + +static int +glusterd_bitrot_signer_threads(glusterd_volinfo_t *volinfo, dict_t *dict, + char *key, char **op_errstr) +{ + int32_t ret = -1; + uint32_t signer_th_count = 0; + uint32_t existing_th_count = 0; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + char dkey[32] = { + 0, + }; + + this = THIS; + GF_ASSERT(this); + + priv = this->private; + GF_VALIDATE_OR_GOTO(this->name, priv, out); + + ret = dict_get_uint32(dict, "signer-threads", &signer_th_count); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get bitrot signer thread count."); + goto out; + } + + ret = dict_get_uint32(volinfo->dict, key, &existing_th_count); + if (ret == 0 && signer_th_count == existing_th_count) { + goto out; + } + + snprintf(dkey, sizeof(dkey), "%d", signer_th_count); + ret = dict_set_dynstr_with_alloc(volinfo->dict, key, dkey); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Failed to set option %s", key); + goto out; + } + + if (!is_bitd_configure_noop(this, volinfo)) { + ret = priv->bitd_svc.manager(&(priv->bitd_svc), NULL, + PROC_START_NO_WAIT); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITDSVC_RECONF_FAIL, + "Failed to reconfigure bitrot services"); + goto out; + } + } +out: + return ret; +} + static int glusterd_bitrot_enable(glusterd_volinfo_t *volinfo, char **op_errstr) { @@ -594,6 +670,15 @@ glusterd_op_bitrot(dict_t *dict, char **op_errstr, dict_t *rsp_dict) volinfo, dict, "features.expiry-time", op_errstr); if (ret) goto out; + break; + + case GF_BITROT_OPTION_TYPE_SIGNER_THREADS: + ret = glusterd_bitrot_signer_threads( + volinfo, dict, "features.signer-threads", op_errstr); + if (ret) + goto out; + break; + case GF_BITROT_CMD_SCRUB_STATUS: case GF_BITROT_CMD_SCRUB_ONDEMAND: break; diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c index 5e5421ed2dc..e56cd0e6c74 100644 --- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c @@ -21,7 +21,6 @@ #include "glusterd-messages.h" #include "glusterd-server-quorum.h" #include <glusterfs/run.h> -#include "glusterd-volgen.h" #include <glusterfs/syscall.h> #include <sys/signal.h> @@ -183,6 +182,9 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo, { int ret = -1; int replica_nodes = 0; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT(this); switch (volinfo->type) { case GF_CLUSTER_TYPE_NONE: @@ -191,8 +193,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo, "replica count (%d) option given for non replicate " "volume %s", replica_count, volinfo->volname); - gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_REPLICA, "%s", - err_str); + gf_smsg(this->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ARGUMENT, + err_str, NULL); goto out; case GF_CLUSTER_TYPE_REPLICATE: @@ -203,8 +205,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo, "than volume %s's replica count (%d)", replica_count, volinfo->volname, volinfo->replica_count); - gf_msg(THIS->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY, - "%s", err_str); + gf_smsg(this->name, GF_LOG_WARNING, EINVAL, + GD_MSG_INVALID_ARGUMENT, err_str, NULL); goto out; } if (replica_count == volinfo->replica_count) { @@ -218,8 +220,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo, "(or %dxN)", brick_count, volinfo->dist_leaf_count, volinfo->dist_leaf_count); - gf_msg(THIS->name, GF_LOG_WARNING, EINVAL, - GD_MSG_INVALID_ENTRY, "%s", err_str); + gf_smsg(this->name, GF_LOG_WARNING, EINVAL, + GD_MSG_INVALID_ARGUMENT, err_str, NULL); goto out; } ret = 1; @@ -234,6 +236,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo, "need %d(xN) bricks for reducing replica " "count of the volume from %d to %d", replica_nodes, volinfo->replica_count, replica_count); + gf_smsg(this->name, GF_LOG_WARNING, EINVAL, + GD_MSG_INVALID_ARGUMENT, err_str, NULL); goto out; } break; @@ -283,6 +287,7 @@ __glusterd_handle_add_brick(rpcsvc_request_t *req) // failed to decode msg; req->rpc_err = GARBAGE_ARGS; snprintf(err_str, sizeof(err_str), "Garbage args received"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } @@ -510,6 +515,8 @@ subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str, int i = 0; int ret = 0; int count = volinfo->replica_count - replica_count; + xlator_t *this = THIS; + GF_ASSERT(this); if (replica_count && subvols) { for (i = 0; i < volinfo->subvol_count; i++) { @@ -519,6 +526,8 @@ subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str, "Remove exactly %d" " brick(s) from each subvolume.", count); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_BRICK_SUBVOL_VERIFY_FAIL, err_str, NULL); break; } } @@ -532,6 +541,8 @@ subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str, ret = -1; snprintf(err_str, err_len, "Bricks not from same subvol for %s", vol_type); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_BRICK_SUBVOL_VERIFY_FAIL, err_str, NULL); break; } } while (++i < volinfo->subvol_count); @@ -556,6 +567,9 @@ glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo = NULL; glusterd_brickinfo_t *last = NULL; char *arbiter_array = NULL; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT(this); if (volinfo->type != GF_CLUSTER_TYPE_REPLICATE) goto out; @@ -574,6 +588,8 @@ glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo, "Remove arbiter " "brick(s) only when converting from " "arbiter to replica 2 subvolume."); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_REMOVE_ARBITER_BRICK, err_str, NULL); ret = -1; goto out; } @@ -598,6 +614,8 @@ glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo, "Removed bricks " "must contain arbiter when converting" " to plain distribute."); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_REMOVE_ARBITER_BRICK, err_str, NULL); ret = -1; break; } @@ -621,6 +639,7 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req) char key[64] = ""; int keylen; int i = 1; + glusterd_conf_t *conf = NULL; glusterd_volinfo_t *volinfo = NULL; glusterd_brickinfo_t *brickinfo = NULL; glusterd_brickinfo_t **brickinfo_list = NULL; @@ -639,12 +658,15 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req) GF_ASSERT(req); this = THIS; GF_ASSERT(this); + conf = this->private; + GF_ASSERT(conf); ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { // failed to decode msg; req->rpc_err = GARBAGE_ARGS; snprintf(err_str, sizeof(err_str), "Received garbage args"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } @@ -835,7 +857,17 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req) if (ret) goto out; - ret = glusterd_op_begin_synctask(req, GD_OP_REMOVE_BRICK, dict); + if (conf->op_version < GD_OP_VERSION_8_0) { + gf_msg_debug(this->name, 0, + "The cluster is operating at " + "version less than %d. remove-brick operation" + "falling back to syncop framework.", + GD_OP_VERSION_8_0); + ret = glusterd_op_begin_synctask(req, GD_OP_REMOVE_BRICK, dict); + } else { + ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_REMOVE_BRICK, + dict); + } out: if (ret) { @@ -991,6 +1023,7 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count, xlator_t *this = NULL; glusterd_conf_t *conf = NULL; gf_boolean_t is_valid_add_brick = _gf_false; + gf_boolean_t restart_shd = _gf_false; struct statvfs brickstat = { 0, }; @@ -1147,6 +1180,15 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count, if (glusterd_is_volume_replicate(volinfo)) { if (replica_count && conf->op_version >= GD_OP_VERSION_3_7_10) { is_valid_add_brick = _gf_true; + if (volinfo->status == GLUSTERD_STATUS_STARTED) { + ret = volinfo->shd.svc.stop(&(volinfo->shd.svc), SIGTERM); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, 0, + GD_MSG_GLUSTER_SERVICES_STOP_FAIL, + "Failed to stop shd for %s.", volinfo->volname); + } + restart_shd = _gf_true; + } ret = generate_dummy_client_volfiles(volinfo); if (ret) { gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, @@ -1221,6 +1263,14 @@ generate_volfiles: out: GF_FREE(free_ptr1); GF_FREE(free_ptr2); + if (restart_shd) { + if (volinfo->shd.svc.manager(&(volinfo->shd.svc), volinfo, + PROC_START_NO_WAIT)) { + gf_msg("glusterd", GF_LOG_CRITICAL, 0, + GD_MSG_GLUSTER_SERVICE_START_FAIL, + "Failed to start shd for %s.", volinfo->volname); + } + } gf_msg_debug("glusterd", 0, "Returning %d", ret); return ret; @@ -1309,14 +1359,14 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "Unable to get volume name"); goto out; } ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "Unable to find volume: %s", volname); goto out; } @@ -1328,13 +1378,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), &replica_count); if (ret) { - gf_msg_debug(THIS->name, 0, "Unable to get replica count"); - } - - ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"), - &arbiter_count); - if (ret) { - gf_msg_debug(THIS->name, 0, "No arbiter count present in the dict"); + gf_msg_debug(this->name, 0, "Unable to get replica count"); } if (replica_count > 0) { @@ -1348,18 +1392,20 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) } } - if (glusterd_is_volume_replicate(volinfo)) { + glusterd_add_peers_to_auth_list(volname); + + if (replica_count && glusterd_is_volume_replicate(volinfo)) { /* Do not allow add-brick for stopped volumes when replica-count * is being increased. */ - if (conf->op_version >= GD_OP_VERSION_3_7_10 && replica_count && - GLUSTERD_STATUS_STOPPED == volinfo->status) { + if (GLUSTERD_STATUS_STOPPED == volinfo->status && + conf->op_version >= GD_OP_VERSION_3_7_10) { ret = -1; snprintf(msg, sizeof(msg), " Volume must not be in" " stopped state when replica-count needs to " " be increased."); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", msg); *op_errstr = gf_strdup(msg); goto out; @@ -1367,25 +1413,31 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) /* op-version check for replica 2 to arbiter conversion. If we * don't have this check, an older peer added as arbiter brick * will not have the arbiter xlator in its volfile. */ - if ((conf->op_version < GD_OP_VERSION_3_8_0) && (arbiter_count == 1) && - (replica_count == 3)) { - ret = -1; - snprintf(msg, sizeof(msg), - "Cluster op-version must " - "be >= 30800 to add arbiter brick to a " - "replica 2 volume."); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", - msg); - *op_errstr = gf_strdup(msg); - goto out; + if ((replica_count == 3) && (conf->op_version < GD_OP_VERSION_3_8_0)) { + ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"), + &arbiter_count); + if (ret) { + gf_msg_debug(this->name, 0, + "No arbiter count present in the dict"); + } else if (arbiter_count == 1) { + ret = -1; + snprintf(msg, sizeof(msg), + "Cluster op-version must " + "be >= 30800 to add arbiter brick to a " + "replica 2 volume."); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", + msg); + *op_errstr = gf_strdup(msg); + goto out; + } } /* Do not allow increasing replica count for arbiter volumes. */ - if (replica_count && volinfo->arbiter_count) { + if (volinfo->arbiter_count) { ret = -1; snprintf(msg, sizeof(msg), "Increasing replica count " "for arbiter volumes is not supported."); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", msg); *op_errstr = gf_strdup(msg); goto out; @@ -1394,6 +1446,43 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) is_force = dict_get_str_boolean(dict, "force", _gf_false); + /* Check brick order if the volume type is replicate or disperse. If + * force at the end of command not given then check brick order. + * doing this check at the originator node is sufficient. + */ + + if (!is_force && is_origin_glusterd(dict)) { + ret = 0; + if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) { + gf_msg_debug(this->name, 0, + "Replicate cluster type " + "found. Checking brick order."); + if (replica_count) + ret = glusterd_check_brick_order(dict, msg, volinfo->type, + &volname, &bricks, &count, + replica_count); + else + ret = glusterd_check_brick_order(dict, msg, volinfo->type, + &volname, &bricks, &count, + volinfo->replica_count); + } else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) { + gf_msg_debug(this->name, 0, + "Disperse cluster type" + " found. Checking brick order."); + ret = glusterd_check_brick_order(dict, msg, volinfo->type, &volname, + &bricks, &count, + volinfo->disperse_count); + } + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER, + "Not adding brick because of " + "bad brick order. %s", + msg); + *op_errstr = gf_strdup(msg); + goto out; + } + } + if (volinfo->replica_count < replica_count && !is_force) { cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list) { @@ -1410,7 +1499,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) if (len < 0) { strcpy(msg, "<error>"); } - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s", msg); *op_errstr = gf_strdup(msg); goto out; @@ -1442,24 +1531,40 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) "Volume name %s rebalance is in " "progress. Please retry after completion", volname); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg); *op_errstr = gf_strdup(msg); ret = -1; goto out; } - ret = dict_get_int32n(dict, "count", SLEN("count"), &count); - if (ret) { - gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, - "Unable to get count"); - goto out; + if (volinfo->snap_count > 0 || !cds_list_empty(&volinfo->snap_volumes)) { + snprintf(msg, sizeof(msg), + "Volume %s has %" PRIu64 + " snapshots. " + "Changing the volume configuration will not effect snapshots." + "But the snapshot brick mount should be intact to " + "make them function.", + volname, volinfo->snap_count); + gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s", msg); + msg[0] = '\0'; + } + + if (!count) { + ret = dict_get_int32n(dict, "count", SLEN("count"), &count); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get count"); + goto out; + } } - ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks); - if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, - "Unable to get bricks"); - goto out; + if (!bricks) { + ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get bricks"); + goto out; + } } if (bricks) { @@ -1478,7 +1583,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) "brick path %s is " "too long", brick); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s", + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s", msg); *op_errstr = gf_strdup(msg); @@ -1489,7 +1594,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict) ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true, NULL); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND, + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND, "Add-brick: Unable" " to get brickinfo"); goto out; @@ -1559,7 +1664,7 @@ out: GF_FREE(str_ret); GF_FREE(all_bricks); - gf_msg_debug(THIS->name, 0, "Returning %d", ret); + gf_msg_debug(this->name, 0, "Returning %d", ret); return ret; } @@ -1583,6 +1688,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count, }; glusterd_conf_t *priv = THIS->private; int pid = -1; + xlator_t *this = THIS; + GF_ASSERT(this); /* Check whether all the nodes of the bricks to be removed are * up, if not fail the operation */ @@ -1591,6 +1698,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count, ret = dict_get_strn(dict, key, keylen, &brick); if (ret) { snprintf(msg, sizeof(msg), "Unable to get %s", key); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "key=%s", key, NULL); *errstr = gf_strdup(msg); goto out; } @@ -1602,6 +1711,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count, "Incorrect brick " "%s for volume %s", brick, volinfo->volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INCORRECT_BRICK, + "Brick=%s, Volume=%s", brick, volinfo->volname, NULL); *errstr = gf_strdup(msg); goto out; } @@ -1614,6 +1725,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count, "is not decommissioned. " "Use start or force option", brick); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_NOT_DECOM, + "Use 'start' or 'force' option, Brick=%s", brick, NULL); *errstr = gf_strdup(msg); ret = -1; goto out; @@ -1640,6 +1753,10 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count, "brick %s. Use force option to " "remove the offline brick", brick); + gf_smsg( + this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_STOPPED, + "Use 'force' option to remove the offline brick, Brick=%s", + brick, NULL); *errstr = gf_strdup(msg); ret = -1; goto out; @@ -1650,6 +1767,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count, "Found dead " "brick %s", brick); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_DEAD, + "Brick=%s", brick, NULL); *errstr = gf_strdup(msg); ret = -1; goto out; @@ -1667,6 +1786,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count, "Host node of the " "brick %s is not in cluster", brick); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_BRICK_HOST_NOT_FOUND, "Brick=%s", brick, NULL); *errstr = gf_strdup(msg); ret = -1; goto out; @@ -1677,6 +1798,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count, "Host node of the " "brick %s is down", brick); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_HOST_DOWN, + "Brick=%s", brick, NULL); *errstr = gf_strdup(msg); ret = -1; goto out; @@ -1756,6 +1879,7 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr) errstr = gf_strdup( "Deleting all the bricks of the " "volume is not allowed"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_DELETE, NULL); ret = -1; goto out; } @@ -1764,6 +1888,8 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr) switch (cmd) { case GF_OP_CMD_NONE: errstr = gf_strdup("no remove-brick command issued"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NO_REMOVE_CMD, + NULL); goto out; case GF_OP_CMD_STATUS: @@ -1837,6 +1963,21 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr) goto out; } + if (volinfo->snap_count > 0 || + !cds_list_empty(&volinfo->snap_volumes)) { + snprintf(msg, sizeof(msg), + "Volume %s has %" PRIu64 + " snapshots. " + "Changing the volume configuration will not effect " + "snapshots." + "But the snapshot brick mount should be intact to " + "make them function.", + volname, volinfo->snap_count); + gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s", + msg); + msg[0] = '\0'; + } + ret = glusterd_remove_brick_validate_bricks( cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE); if (ret) @@ -1873,6 +2014,8 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr) errstr = gf_strdup( "use 'force' option as migration " "is in progress"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_MIGRATION_PROG, + "Use 'force' option", NULL); goto out; } @@ -1880,6 +2023,8 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr) errstr = gf_strdup( "use 'force' option as migration " "has failed"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_MIGRATION_FAIL, + "Use 'force' option", NULL); goto out; } @@ -1890,6 +2035,11 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr) "use 'force' option as migration " "of some files might have been skipped or " "has failed"); + gf_smsg(this->name, GF_LOG_WARNING, 0, + GD_MSG_MIGRATION_FAIL, + "Use 'force' option, some files might have been " + "skipped", + NULL); goto out; } } @@ -2084,6 +2234,119 @@ out: } int +glusterd_post_commit_add_brick(dict_t *dict, char **op_errstr) +{ + int ret = 0; + char *volname = NULL; + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + ret = glusterd_replace_old_auth_allow_list(volname); +out: + return ret; +} + +int +glusterd_post_commit_replace_brick(dict_t *dict, char **op_errstr) +{ + int ret = 0; + char *volname = NULL; + + ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); + + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + ret = glusterd_replace_old_auth_allow_list(volname); +out: + return ret; +} + +int +glusterd_set_rebalance_id_for_remove_brick(dict_t *req_dict, dict_t *rsp_dict) +{ + int ret = -1; + char *volname = NULL; + glusterd_volinfo_t *volinfo = NULL; + char msg[2048] = {0}; + char *task_id_str = NULL; + xlator_t *this = NULL; + int32_t cmd = 0; + + this = THIS; + GF_ASSERT(this); + + GF_ASSERT(rsp_dict); + GF_ASSERT(req_dict); + + ret = dict_get_strn(rsp_dict, "volname", SLEN("volname"), &volname); + if (ret) { + gf_msg_debug(this->name, 0, "volname not found"); + goto out; + } + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, + "Unable to allocate memory"); + goto out; + } + + ret = dict_get_int32n(rsp_dict, "command", SLEN("command"), &cmd); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Unable to get command"); + goto out; + } + + /* remove brick task id is generted in glusterd_op_stage_remove_brick(), + * but rsp_dict is unavailable there. So copying it to rsp_dict from + * req_dict here. */ + + if (is_origin_glusterd(rsp_dict)) { + ret = dict_get_strn(req_dict, GF_REMOVE_BRICK_TID_KEY, + SLEN(GF_REMOVE_BRICK_TID_KEY), &task_id_str); + if (ret) { + snprintf(msg, sizeof(msg), "Missing rebalance id for remove-brick"); + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_REBALANCE_ID_MISSING, + "%s", msg); + ret = 0; + } else { + gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id); + + ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id, + rsp_dict, GF_REMOVE_BRICK_TID_KEY, + SLEN(GF_REMOVE_BRICK_TID_KEY)); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_REMOVE_BRICK_ID_SET_FAIL, + "Failed to set remove-brick-id"); + goto out; + } + } + } + if (!gf_uuid_is_null(volinfo->rebal.rebalance_id) && + GD_OP_REMOVE_BRICK == volinfo->rebal.op) { + ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id, rsp_dict, + GF_REMOVE_BRICK_TID_KEY, + SLEN(GF_REMOVE_BRICK_TID_KEY)); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Failed to set task-id for volume %s", volname); + goto out; + } + } +out: + return ret; +} +int glusterd_op_remove_brick(dict_t *dict, char **op_errstr) { int ret = -1; @@ -2397,7 +2660,7 @@ out: GF_FREE(brick_tmpstr); if (bricks_dict) dict_unref(bricks_dict); - + gf_msg_debug(this->name, 0, "returning %d ", ret); return ret; } diff --git a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c index 09f0a35dc45..5c01f0c70b6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c @@ -26,12 +26,17 @@ glusterd_conn_init(glusterd_conn_t *conn, char *sockpath, int frame_timeout, xlator_t *this = THIS; glusterd_svc_t *svc = NULL; - if (!this) + if (!this) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_XLATOR_NOT_DEFINED, + NULL); goto out; + } options = dict_new(); - if (!options) + if (!options) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } svc = glusterd_conn_get_svc_object(conn); if (!svc) { @@ -46,8 +51,11 @@ glusterd_conn_init(glusterd_conn_t *conn, char *sockpath, int frame_timeout, ret = dict_set_int32n(options, "transport.socket.ignore-enoent", SLEN("transport.socket.ignore-enoent"), 1); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=transport.socket.ignore-enoent", NULL); goto out; + } /* @options is free'd by rpc_transport when destroyed */ rpc = rpc_clnt_new(options, this, (char *)svc->name, 16); @@ -61,9 +69,10 @@ glusterd_conn_init(glusterd_conn_t *conn, char *sockpath, int frame_timeout, goto out; ret = snprintf(conn->sockpath, sizeof(conn->sockpath), "%s", sockpath); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; - else + } else ret = 0; conn->frame_timeout = frame_timeout; diff --git a/xlators/mgmt/glusterd/src/glusterd-ganesha.c b/xlators/mgmt/glusterd/src/glusterd-ganesha.c index cf567fa4172..f08bd6cebee 100644 --- a/xlators/mgmt/glusterd/src/glusterd-ganesha.c +++ b/xlators/mgmt/glusterd/src/glusterd-ganesha.c @@ -421,6 +421,35 @@ check_host_list(void) } int +gd_ganesha_send_dbus(char *volname, char *value) +{ + runner_t runner = { + 0, + }; + int ret = -1; + runinit(&runner); + + GF_VALIDATE_OR_GOTO("glusterd-ganesha", volname, out); + GF_VALIDATE_OR_GOTO("glusterd-ganesha", value, out); + + ret = 0; + if (check_host_list()) { + /* Check whether ganesha is running on this node */ + if (manage_service("status")) { + gf_msg("glusterd-ganesha", GF_LOG_WARNING, 0, + GD_MSG_GANESHA_NOT_RUNNING, + "Export failed, NFS-Ganesha is not running"); + } else { + runner_add_args(&runner, GANESHA_PREFIX "/dbus-send.sh", CONFDIR, + value, volname, NULL); + ret = runner_run(&runner); + } + } +out: + return ret; +} + +int manage_export_config(char *volname, char *value, char **op_errstr) { runner_t runner = { @@ -447,9 +476,6 @@ int ganesha_manage_export(dict_t *dict, char *value, gf_boolean_t update_cache_invalidation, char **op_errstr) { - runner_t runner = { - 0, - }; int ret = -1; glusterd_volinfo_t *volinfo = NULL; dict_t *vol_opts = NULL; @@ -458,7 +484,6 @@ ganesha_manage_export(dict_t *dict, char *value, glusterd_conf_t *priv = NULL; gf_boolean_t option = _gf_false; - runinit(&runner); this = THIS; GF_ASSERT(this); priv = this->private; @@ -538,26 +563,13 @@ ganesha_manage_export(dict_t *dict, char *value, goto out; } } - - if (check_host_list()) { - /* Check whether ganesha is running on this node */ - if (manage_service("status")) { - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GANESHA_NOT_RUNNING, - "Export failed, NFS-Ganesha is not running"); - } else { - runner_add_args(&runner, GANESHA_PREFIX "/dbus-send.sh", CONFDIR, - value, volname, NULL); - ret = runner_run(&runner); - if (ret) { - gf_asprintf(op_errstr, - "Dynamic export" - " addition/deletion failed." - " Please see log file for details"); - goto out; - } - } + ret = gd_ganesha_send_dbus(volname, value); + if (ret) { + gf_asprintf(op_errstr, + "Dynamic export addition/deletion failed." + " Please see log file for details"); + goto out; } - if (update_cache_invalidation) { vol_opts = volinfo->dict; ret = dict_set_dynstr_with_alloc(vol_opts, @@ -617,8 +629,9 @@ tear_down_cluster(gf_boolean_t run_teardown) goto out; } - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); - while (entry) { + while ((entry = sys_readdir(dir, scratch))) { + if (gf_irrelevant_entry(entry)) + continue; snprintf(path, PATH_MAX, "%s/%s", CONFDIR, entry->d_name); ret = sys_lstat(path, &st); if (ret == -1) { @@ -649,7 +662,6 @@ tear_down_cluster(gf_boolean_t run_teardown) gf_msg_debug(THIS->name, 0, "%s %s", ret ? "Failed to remove" : "Removed", entry->d_name); - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); } ret = sys_closedir(dir); diff --git a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c index 76b7684538f..bf062c87060 100644 --- a/xlators/mgmt/glusterd/src/glusterd-geo-rep.c +++ b/xlators/mgmt/glusterd/src/glusterd-geo-rep.c @@ -115,13 +115,18 @@ __glusterd_handle_sys_exec(rpcsvc_request_t *req) ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + snprintf(err_str, sizeof(err_str), "Garbage args received"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } if (cli_req.dict.dict_len) { dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); goto out; + } ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict); @@ -142,13 +147,18 @@ __glusterd_handle_sys_exec(rpcsvc_request_t *req) snprintf(err_str, sizeof(err_str), "Failed to get " "the uuid of local glusterd"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_UUID_GET_FAIL, + NULL); ret = -1; goto out; } ret = dict_set_dynstr(dict, "host-uuid", host_uuid); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=host-uuid", NULL); goto out; + } } ret = glusterd_op_begin_synctask(req, cli_op, dict); @@ -188,13 +198,18 @@ __glusterd_handle_copy_file(rpcsvc_request_t *req) ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + snprintf(err_str, sizeof(err_str), "Garbage args received"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } if (cli_req.dict.dict_len) { dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); goto out; + } ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict); @@ -215,6 +230,8 @@ __glusterd_handle_copy_file(rpcsvc_request_t *req) snprintf(err_str, sizeof(err_str), "Failed to get " "the uuid of local glusterd"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_UUID_GET_FAIL, + NULL); ret = -1; goto out; } @@ -267,13 +284,18 @@ __glusterd_handle_gsync_set(rpcsvc_request_t *req) ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + snprintf(err_str, sizeof(err_str), "Garbage args received"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } if (cli_req.dict.dict_len) { dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); goto out; + } ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict); @@ -294,6 +316,8 @@ __glusterd_handle_gsync_set(rpcsvc_request_t *req) snprintf(err_str, sizeof(err_str), "Failed to get " "the uuid of local glusterd"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_UUID_GET_FAIL, + NULL); ret = -1; goto out; } @@ -2251,6 +2275,9 @@ glusterd_op_verify_gsync_running(glusterd_volinfo_t *volinfo, char *slave, "Volume %s needs to be started " "before " GEOREP " start", volinfo->volname); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_GEO_REP_START_FAILED, + "Volume is not in a started state, Volname=%s", + volinfo->volname, NULL); goto out; } @@ -2555,6 +2582,7 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr) len = snprintf(abs_filename, sizeof(abs_filename), "%s/%s", priv->workdir, filename); if ((len < 0) || (len >= sizeof(abs_filename))) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -2567,6 +2595,9 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr) if (len < 0) { strcpy(errmsg, "<error>"); } + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_REALPATH_GET_FAIL, + "Realpath=%s, Reason=%s", priv->workdir, strerror(errno), + NULL); *op_errstr = gf_strdup(errmsg); ret = -1; goto out; @@ -2577,6 +2608,8 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr) "Failed to get " "realpath of %s: %s", filename, strerror(errno)); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_REALPATH_GET_FAIL, + "Filename=%s, Reason=%s", filename, strerror(errno), NULL); *op_errstr = gf_strdup(errmsg); ret = -1; goto out; @@ -2586,6 +2619,7 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr) will succeed for /var/lib/glusterd_bad */ len = snprintf(workdir, sizeof(workdir), "%s/", realpath_workdir); if ((len < 0) || (len >= sizeof(workdir))) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -2599,6 +2633,8 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr) if (len < 0) { strcpy(errmsg, "<error>"); } + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, errmsg, + NULL); *op_errstr = gf_strdup(errmsg); ret = -1; goto out; @@ -2613,6 +2649,8 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr) if (len < 0) { strcpy(errmsg, "<error>"); } + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, errmsg, + NULL); *op_errstr = gf_strdup(errmsg); goto out; } @@ -2621,9 +2659,9 @@ glusterd_op_stage_copy_file(dict_t *dict, char **op_errstr) snprintf(errmsg, sizeof(errmsg), "Source file" " is not a regular file."); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, errmsg, + NULL); *op_errstr = gf_strdup(errmsg); - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SRC_FILE_ERROR, "%s", - errmsg); ret = -1; goto out; } @@ -2842,8 +2880,11 @@ glusterd_verify_slave(char *volname, char *slave_url, char *slave_vol, */ if (strstr(slave_url, "@")) { slave_url_buf = gf_strdup(slave_url); - if (!slave_url_buf) + if (!slave_url_buf) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_STRDUP_FAILED, + "Slave_url=%s", slave_url, NULL); goto out; + } slave_user = strtok_r(slave_url_buf, "@", &save_ptr); slave_ip = strtok_r(NULL, "@", &save_ptr); diff --git a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c index b01fd4da24b..a0bfea41f0f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c +++ b/xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c @@ -310,7 +310,7 @@ glusterd_gfproxydsvc_start(glusterd_svc_t *svc, int flags) } runinit(&runner); - if (this->ctx->cmd_args.valgrind) { + if (this->ctx->cmd_args.vgtool != _gf_none) { len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s", svc->proc.logdir, svc->proc.logfile); if ((len < 0) || (len >= PATH_MAX)) { @@ -318,8 +318,13 @@ glusterd_gfproxydsvc_start(glusterd_svc_t *svc, int flags) goto out; } - runner_add_args(&runner, "valgrind", "--leak-check=full", - "--trace-children=yes", "--track-origins=yes", NULL); + if (this->ctx->cmd_args.vgtool == _gf_memcheck) + runner_add_args(&runner, "valgrind", "--leak-check=full", + "--trace-children=yes", "--track-origins=yes", + NULL); + else + runner_add_args(&runner, "valgrind", "--tool=drd", NULL); + runner_argprintf(&runner, "--log-file=%s", valgrind_logfile); } diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index 175e73f5ae4..1b21c40596d 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -140,6 +140,7 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname, ctx->req = req; if (!dict) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -147,9 +148,11 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname, ret = dict_unserialize(friend_req->vols.vols_val, friend_req->vols.vols_len, &dict); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); goto out; - else + } else dict->extra_stdfree = friend_req->vols.vols_val; ctx->vols = dict; @@ -386,82 +389,129 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes, keylen = snprintf(key, sizeof(key), "volume%d.name", count); ret = dict_set_strn(volumes, key, keylen, volinfo->volname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.type", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->type); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.status", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->status); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.brick_count", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->brick_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.dist_count", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->dist_leaf_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.stripe_count", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->stripe_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.replica_count", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->replica_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.disperse_count", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->disperse_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.redundancy_count", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->redundancy_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.arbiter_count", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->arbiter_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.transport", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->transport_type); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_count", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->thin_arbiter_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id)); - if (!volume_id_str) + if (!volume_id_str) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.volume_id", count); ret = dict_set_dynstrn(volumes, key, keylen, volume_id_str); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.rebalance", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->rebal.defrag_cmd); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.snap_count", count); ret = dict_set_int32n(volumes, key, keylen, volinfo->snap_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list) { @@ -474,23 +524,33 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes, len = snprintf(brick, sizeof(brick), "%s:%s", brickinfo->hostname, brickinfo->path); if ((len < 0) || (len >= sizeof(brick))) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } buf = gf_strdup(brick); keylen = snprintf(key, sizeof(key), "volume%d.brick%d", count, i); ret = dict_set_dynstrn(volumes, key, keylen, buf); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "volume%d.brick%d.uuid", count, i); snprintf(brick_uuid, sizeof(brick_uuid), "%s", uuid_utoa(brickinfo->uuid)); buf = gf_strdup(brick_uuid); - if (!buf) + if (!buf) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "brick_uuid=%s", brick_uuid, NULL); goto out; + } ret = dict_set_dynstrn(volumes, key, keylen, buf); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } i++; } @@ -500,6 +560,7 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes, len = snprintf(ta_brick, sizeof(ta_brick), "%s:%s", ta_brickinfo->hostname, ta_brickinfo->path); if ((len < 0) || (len >= sizeof(ta_brick))) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -507,16 +568,23 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes, keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_brick", count); ret = dict_set_dynstrn(volumes, key, keylen, buf); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } } ret = glusterd_add_arbiter_info_to_bricks(volinfo, volumes, count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_ARBITER_BRICK_SET_INFO_FAIL, NULL); goto out; + } dict = volinfo->dict; if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = 0; goto out; } @@ -812,11 +880,14 @@ glusterd_req_ctx_create(rpcsvc_request_t *rpc_req, int op, uuid_t uuid, gf_msg_debug(this->name, 0, "Received op from uuid %s", str); dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } req_ctx = GF_CALLOC(1, sizeof(*req_ctx), mem_type); if (!req_ctx) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; } @@ -824,8 +895,8 @@ glusterd_req_ctx_create(rpcsvc_request_t *rpc_req, int op, uuid_t uuid, req_ctx->op = op; ret = dict_unserialize(buf_val, buf_len, &dict); if (ret) { - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to unserialize the dictionary"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); goto out; } @@ -1399,7 +1470,7 @@ __glusterd_handle_cli_get_volume(rpcsvc_request_t *req) goto out; } - gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_GET_VOL_REQ_RCVD, + gf_msg(this->name, GF_LOG_DEBUG, 0, GD_MSG_GET_VOL_REQ_RCVD, "Received get vol req"); if (cli_req.dict.dict_len) { @@ -1601,6 +1672,8 @@ __glusterd_handle_cli_uuid_get(rpcsvc_request_t *req) if (cli_req.dict.dict_len) { dict = dict_new(); if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); ret = -1; goto out; } @@ -1623,6 +1696,7 @@ __glusterd_handle_cli_uuid_get(rpcsvc_request_t *req) rsp_dict = dict_new(); if (!rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -1639,9 +1713,8 @@ __glusterd_handle_cli_uuid_get(rpcsvc_request_t *req) ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val, &rsp.dict.dict_len); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "Failed to serialize " - "dictionary."); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } ret = 0; @@ -1694,8 +1767,10 @@ __glusterd_handle_cli_list_volume(rpcsvc_request_t *req) GF_ASSERT(priv); dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } cds_list_for_each_entry(volinfo, &priv->volumes, vol_list) { @@ -1707,8 +1782,11 @@ __glusterd_handle_cli_list_volume(rpcsvc_request_t *req) } ret = dict_set_int32n(dict, "count", SLEN("count"), count); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto out; + } ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val, &rsp.dict.dict_len); @@ -1790,6 +1868,8 @@ __glusterd_handle_ganesha_cmd(rpcsvc_request_t *req) /* Unserialize the dictionary */ dict = dict_new(); if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); ret = -1; goto out; } @@ -2158,9 +2238,8 @@ glusterd_fsm_log_send_resp(rpcsvc_request_t *req, int op_ret, char *op_errstr, ret = dict_allocate_and_serialize(dict, &rsp.fsm_log.fsm_log_val, &rsp.fsm_log.fsm_log_len); if (ret < 0) { - gf_msg("glusterd", GF_LOG_ERROR, 0, - GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "failed to get serialized length of dict"); + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); return ret; } } @@ -2206,6 +2285,7 @@ __glusterd_handle_fsm_log(rpcsvc_request_t *req) dict = dict_new(); if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -2432,8 +2512,8 @@ glusterd_op_stage_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status, ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val, &rsp.dict.dict_len); if (ret < 0) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "failed to get serialized length of dict"); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); return ret; } @@ -2472,9 +2552,8 @@ glusterd_op_commit_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status, ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val, &rsp.dict.dict_len); if (ret < 0) { - gf_msg(this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "failed to get serialized length of dict"); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } } @@ -2715,12 +2794,18 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req) } ret = dict_get_int32n(dict, "count", SLEN("count"), &count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=count", NULL); goto out; + } ret = dict_get_int32n(dict, "op", SLEN("op"), &op); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=op", NULL); goto out; + } if (GD_FRIEND_UPDATE_DEL == op) { (void)glusterd_handle_friend_update_delete(dict); @@ -2979,8 +3064,11 @@ __glusterd_handle_cli_profile_volume(rpcsvc_request_t *req) if (cli_req.dict.dict_len > 0) { dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); goto out; + } dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict); } @@ -3207,6 +3295,7 @@ __glusterd_handle_umount(rpcsvc_request_t *req) /* check if it is allowed to umount path */ path = gf_strdup(umnt_req.path); if (!path) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL); rsp.op_errno = ENOMEM; goto out; } @@ -3414,12 +3503,16 @@ glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo, char *af = NULL; peerctx = GF_CALLOC(1, sizeof(*peerctx), gf_gld_mt_peerctx_t); - if (!peerctx) + if (!peerctx) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } options = dict_new(); - if (!options) + if (!options) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } if (args) peerctx->args = *args; @@ -3513,6 +3606,7 @@ glusterd_friend_add(const char *hoststr, int port, *friend = glusterd_peerinfo_new(state, uuid, hoststr, port); if (*friend == NULL) { ret = -1; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_ADD_FAIL, NULL); goto out; } @@ -4090,13 +4184,15 @@ glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags) }; int keylen; - priv = THIS->private; + xlator_t *this = THIS; + GF_ASSERT(this); + + priv = this->private; GF_ASSERT(priv); friends = dict_new(); if (!friends) { - gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, - "Out of Memory"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; } @@ -4122,24 +4218,36 @@ unlock: keylen = snprintf(key, sizeof(key), "friend%d.uuid", count); uuid_utoa_r(MY_UUID, my_uuid_str); ret = dict_set_strn(friends, key, keylen, my_uuid_str); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "friend%d.hostname", count); ret = dict_set_nstrn(friends, key, keylen, "localhost", SLEN("localhost")); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "friend%d.connected", count); ret = dict_set_int32n(friends, key, keylen, 1); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } } ret = dict_set_int32n(friends, "count", SLEN("count"), count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto out; + } ret = dict_allocate_and_serialize(friends, &rsp.friends.friends_val, &rsp.friends.friends_len); @@ -4311,8 +4419,11 @@ __glusterd_handle_status_volume(rpcsvc_request_t *req) if (cli_req.dict.dict_len > 0) { dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); goto out; + } ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict); if (ret < 0) { @@ -4580,6 +4691,7 @@ __glusterd_handle_barrier(rpcsvc_request_t *req) dict = dict_new(); if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -5114,12 +5226,17 @@ glusterd_print_gsync_status_by_vol(FILE *fp, glusterd_volinfo_t *volinfo) 0, }; + xlator_t *this = THIS; + GF_ASSERT(this); + GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out); GF_VALIDATE_OR_GOTO(THIS->name, fp, out); gsync_rsp_dict = dict_new(); - if (!gsync_rsp_dict) + if (!gsync_rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = gethostname(my_hostname, sizeof(my_hostname)); if (ret) { @@ -5146,7 +5263,7 @@ glusterd_print_snapinfo_by_vol(FILE *fp, glusterd_volinfo_t *volinfo, glusterd_volinfo_t *tmp_vol = NULL; glusterd_snap_t *snapinfo = NULL; int snapcount = 0; - char timestr[64] = { + char timestr[GF_TIMESTR_SIZE] = { 0, }; char snap_status_str[STATUS_STRLEN] = { @@ -5264,16 +5381,25 @@ glusterd_print_client_details(FILE *fp, dict_t *dict, ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"), brickinfo->path); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=brick-name", NULL); goto out; + } ret = dict_set_int32n(dict, "cmd", SLEN("cmd"), GF_CLI_STATUS_CLIENTS); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=cmd", NULL); goto out; + } ret = dict_set_strn(dict, "volname", SLEN("volname"), volinfo->volname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=volname", NULL); goto out; + } ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val, &brick_req->input.input_len); @@ -5467,7 +5593,7 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict) ret = dict_get_strn(dict, "filename", SLEN("filename"), &tmp_str); if (ret) { - now = time(NULL); + now = gf_time(); strftime(timestamp, sizeof(timestamp), "%Y%m%d_%H%M%S", localtime(&now)); gf_asprintf(&filename, "%s_%s", "glusterd_state", timestamp); @@ -5911,14 +6037,27 @@ get_brickinfo_from_brickid(char *brickid, glusterd_brickinfo_t **brickinfo) uuid_t volid = {0}; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); + brickid_dup = gf_strdup(brickid); - if (!brickid_dup) + if (!brickid_dup) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "brick_id=%s", brickid, NULL); goto out; + } volid_str = brickid_dup; brick = strchr(brickid_dup, ':'); - if (!volid_str || !brick) + if (!volid_str) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); goto out; + } + + if (!brick) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); + goto out; + } *brick = '\0'; brick++; diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c index 7cb70fcb4e2..d96e35503dd 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handshake.c +++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c @@ -111,6 +111,8 @@ get_snap_volname_and_volinfo(const char *volpath, char **volname, volfile_token = strtok_r(NULL, "/", &save_ptr); *volname = gf_strdup(volfile_token); if (NULL == *volname) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "Volname=%s", volfile_token, NULL); ret = -1; goto out; } @@ -236,6 +238,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, if (volid_ptr) { volid_ptr = strchr(volid_ptr, '/'); if (!volid_ptr) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); ret = -1; goto out; } @@ -256,6 +259,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, if (volid_ptr) { volid_ptr = strchr(volid_ptr, '/'); if (!volid_ptr) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); ret = -1; goto out; } @@ -271,6 +275,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, if (volid_ptr) { volid_ptr = strchr(volid_ptr, '/'); if (!volid_ptr) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); ret = -1; goto out; } @@ -292,6 +297,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, if (volid_ptr) { volid_ptr = strchr(volid_ptr, '/'); if (!volid_ptr) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); ret = -1; goto out; } @@ -312,6 +318,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, if (volid_ptr) { volid_ptr = strchr(volid_ptr, '/'); if (!volid_ptr) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); ret = -1; goto out; } @@ -366,6 +373,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, if (volid_ptr) { volid_ptr = strchr(volid_ptr, '/'); if (!volid_ptr) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); ret = -1; goto out; } @@ -386,6 +394,7 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, if (volid_ptr) { volid_ptr = strchr(volid_ptr, '/'); if (!volid_ptr) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); ret = -1; goto out; } @@ -402,6 +411,8 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, /* Split the volume name */ vol = strtok_r(dup_volname, ".", &save_ptr); if (!vol) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SPLIT_FAIL, + "Volume name=%s", dup_volname, NULL); ret = -1; goto out; } @@ -446,18 +457,25 @@ build_volfile_path(char *volume_id, char *path, size_t path_len, if (ret) { dup_volname = gf_strdup(volid_ptr); if (!dup_volname) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "Volume name=%s", volid_ptr, NULL); ret = -1; goto out; } /* Split the volume name */ vol = strtok_r(dup_volname, ".", &save_ptr); if (!vol) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SPLIT_FAIL, + "Volume name=%s", dup_volname, NULL); ret = -1; goto out; } ret = glusterd_volinfo_find(vol, &volinfo); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, + NULL); goto out; + } } gotvolinfo: @@ -466,8 +484,10 @@ gotvolinfo: ret = snprintf(path, path_len, "%s/%s/%s.vol", path_prefix, volinfo->volname, volid_ptr); - if (ret == -1) + if (ret == -1) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; + } ret = sys_stat(path, &stbuf); @@ -522,12 +542,14 @@ glusterd_get_args_from_dict(gf_getspec_req *args, peer_info_t *peerinfo, GF_ASSERT(peerinfo); if (!args->xdata.xdata_len) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); ret = 0; goto out; } dict = dict_new(); if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -561,6 +583,8 @@ glusterd_get_args_from_dict(gf_getspec_req *args, peer_info_t *peerinfo, } *brick_name = gf_strdup(name); if (*brick_name == NULL) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "Brick_name=%s", name, NULL); ret = -1; goto out; } @@ -976,6 +1000,7 @@ __server_getspec(rpcsvc_request_t *req) dict = dict_new(); if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -ENOMEM; goto fail; } @@ -1037,10 +1062,11 @@ __server_getspec(rpcsvc_request_t *req) } RCU_READ_UNLOCK; if (peer_cnt) { - ret = dict_set_str(dict, GLUSTERD_BRICK_SERVERS, peer_hosts); - if (ret) { + op_ret = dict_set_str(dict, GLUSTERD_BRICK_SERVERS, peer_hosts); + if (op_ret) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "failed to set peer_host in dict"); + ret = op_ret; goto fail; } } @@ -1050,9 +1076,8 @@ __server_getspec(rpcsvc_request_t *req) ret = dict_allocate_and_serialize(dict, &rsp.xdata.xdata_val, &rsp.xdata.xdata_len); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "Failed to serialize dict to request buffer"); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto fail; } } @@ -1073,6 +1098,7 @@ __server_getspec(rpcsvc_request_t *req) } ret = file_len = stbuf.st_size; } else { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND, NULL); op_errno = ENOENT; goto fail; } @@ -1080,6 +1106,7 @@ __server_getspec(rpcsvc_request_t *req) if (file_len) { rsp.spec = CALLOC(file_len + 1, sizeof(char)); if (!rsp.spec) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); ret = -1; op_errno = ENOMEM; goto fail; @@ -1158,13 +1185,17 @@ __server_event_notify(rpcsvc_request_t *req) (xdrproc_t)xdr_gf_event_notify_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto fail; } if (args.dict.dict_len) { dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); return ret; + } ret = dict_unserialize(args.dict.dict_val, args.dict.dict_len, &dict); if (ret) { gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, @@ -1357,6 +1388,7 @@ __glusterd_mgmt_hndsk_versions(rpcsvc_request_t *req) if (ret < 0) { // failed to decode msg; req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } @@ -1370,8 +1402,10 @@ __glusterd_mgmt_hndsk_versions(rpcsvc_request_t *req) } dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = dict_set_int32(dict, GD_OP_VERSION_KEY, conf->op_version); if (ret) { @@ -1457,6 +1491,7 @@ __glusterd_mgmt_hndsk_versions_ack(rpcsvc_request_t *req) if (ret < 0) { // failed to decode msg; req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } @@ -1529,22 +1564,25 @@ __server_get_volume_info(rpcsvc_request_t *req) char *volume_id_str = NULL; int32_t flags = 0; + xlator_t *this = THIS; + GF_ASSERT(this); + ret = xdr_to_generic(req->msg[0], &vol_info_req, (xdrproc_t)xdr_gf_get_volume_info_req); if (ret < 0) { /* failed to decode msg */ req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } - gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_VOL_INFO_REQ_RECVD, - "Received get volume info req"); + gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_INFO_REQ_RECVD, NULL); if (vol_info_req.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new(); if (!dict) { - gf_msg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_NO_MEMORY, - "Out of Memory"); + gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL, + NULL); op_errno = ENOMEM; ret = -1; goto out; @@ -1553,9 +1591,8 @@ __server_get_volume_info(rpcsvc_request_t *req) ret = dict_unserialize(vol_info_req.dict.dict_val, vol_info_req.dict.dict_len, &dict); if (ret < 0) { - gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to " - "unserialize req-buffer to dictionary"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); op_errno = -ret; ret = -1; goto out; @@ -1566,8 +1603,8 @@ __server_get_volume_info(rpcsvc_request_t *req) ret = dict_get_int32(dict, "flags", &flags); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED, - "failed to get flags"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=flags", NULL); op_errno = -ret; ret = -1; goto out; @@ -1575,13 +1612,15 @@ __server_get_volume_info(rpcsvc_request_t *req) if (!flags) { /* Nothing to query about. Just return success */ - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_NO_FLAG_SET, "No flags set"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_FLAG_SET, NULL); ret = 0; goto out; } ret = dict_get_str(dict, "volname", &volname); if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); op_errno = EINVAL; ret = -1; goto out; @@ -1589,6 +1628,8 @@ __server_get_volume_info(rpcsvc_request_t *req) ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, + "Volname=%s", volname, NULL); op_errno = EINVAL; ret = -1; goto out; @@ -1597,6 +1638,8 @@ __server_get_volume_info(rpcsvc_request_t *req) if (flags & (int32_t)GF_GET_VOLUME_UUID) { volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id)); if (!volume_id_str) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + NULL); op_errno = ENOMEM; ret = -1; goto out; @@ -1604,8 +1647,8 @@ __server_get_volume_info(rpcsvc_request_t *req) dict_rsp = dict_new(); if (!dict_rsp) { - gf_msg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_NO_MEMORY, - "Out of Memory"); + gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL, + NULL); op_errno = ENOMEM; GF_FREE(volume_id_str); ret = -1; @@ -1613,6 +1656,8 @@ __server_get_volume_info(rpcsvc_request_t *req) } ret = dict_set_dynstr(dict_rsp, "volume_id", volume_id_str); if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=volume_id", NULL); op_errno = -ret; ret = -1; goto out; @@ -1621,6 +1666,8 @@ __server_get_volume_info(rpcsvc_request_t *req) ret = dict_allocate_and_serialize(dict_rsp, &vol_info_rsp.dict.dict_val, &vol_info_rsp.dict.dict_len); if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); op_errno = -ret; ret = -1; goto out; @@ -1686,6 +1733,8 @@ __server_get_snap_info(rpcsvc_request_t *req) if (snap_info_req.dict.dict_len) { dict = dict_new(); if (!dict) { + gf_smsg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL, + NULL); op_errno = ENOMEM; ret = -1; goto out; @@ -1716,6 +1765,8 @@ __server_get_snap_info(rpcsvc_request_t *req) dict_rsp = dict_new(); if (!dict_rsp) { + gf_smsg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL, + NULL); op_errno = ENOMEM; ret = -1; goto out; @@ -1908,22 +1959,45 @@ gd_validate_peer_op_version(xlator_t *this, glusterd_peerinfo_t *peerinfo, int32_t peer_min_op_version = 0; int32_t peer_max_op_version = 0; - if (!dict || !this || !peerinfo) + if (!dict) { + gf_smsg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL, + NULL); goto out; + } + + if (!this) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_XLATOR_NOT_DEFINED, + NULL); + goto out; + } + + if (!peerinfo) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); + goto out; + } conf = this->private; ret = dict_get_int32(dict, GD_OP_VERSION_KEY, &peer_op_version); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", GD_OP_VERSION_KEY, NULL); goto out; + } ret = dict_get_int32(dict, GD_MAX_OP_VERSION_KEY, &peer_max_op_version); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", GD_MAX_OP_VERSION_KEY, NULL); goto out; + } ret = dict_get_int32(dict, GD_MIN_OP_VERSION_KEY, &peer_min_op_version); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", GD_MIN_OP_VERSION_KEY, NULL); goto out; + } ret = -1; /* Check if peer can support our op_version */ @@ -2189,14 +2263,20 @@ glusterd_mgmt_handshake(xlator_t *this, glusterd_peerctx_t *peerctx) int ret = -1; frame = create_frame(this, this->ctx->pool); - if (!frame) + if (!frame) { + gf_smsg("glusterd", GF_LOG_WARNING, errno, GD_MSG_FRAME_CREATE_FAIL, + NULL); goto out; + } frame->local = peerctx; req_dict = dict_new(); - if (!req_dict) + if (!req_dict) { + gf_smsg("glusterd", GF_LOG_WARNING, ENOMEM, GD_MSG_DICT_CREATE_FAIL, + NULL); goto out; + } ret = dict_set_dynstr(req_dict, GD_PEER_ID_KEY, gf_strdup(uuid_utoa(MY_UUID))); @@ -2463,12 +2543,17 @@ glusterd_peer_dump_version(xlator_t *this, struct rpc_clnt *rpc, int ret = -1; frame = create_frame(this, this->ctx->pool); - if (!frame) + if (!frame) { + gf_smsg(this->name, GF_LOG_WARNING, errno, GD_MSG_FRAME_CREATE_FAIL, + NULL); goto out; + } frame->local = peerctx; - if (!peerctx) + if (!peerctx) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } RCU_READ_LOCK; diff --git a/xlators/mgmt/glusterd/src/glusterd-hooks.c b/xlators/mgmt/glusterd/src/glusterd-hooks.c index 511a102d016..61c0f1c946f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-hooks.c +++ b/xlators/mgmt/glusterd/src/glusterd-hooks.c @@ -87,21 +87,24 @@ glusterd_hooks_create_hooks_directory(char *basedir) glusterd_conf_t *priv = NULL; int32_t len = 0; - priv = THIS->private; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT(this); + priv = this->private; snprintf(path, sizeof(path), "%s/hooks", basedir); ret = mkdir_p(path, 0755, _gf_true); if (ret) { - gf_msg(THIS->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED, - "Unable to create %s", path); + gf_smsg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED, + "Path=%s", path, NULL); goto out; } GLUSTERD_GET_HOOKS_DIR(version_dir, GLUSTERD_HOOK_VER, priv); ret = mkdir_p(version_dir, 0755, _gf_true); if (ret) { - gf_msg(THIS->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED, - "Unable to create %s", version_dir); + gf_smsg(this->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED, + "Directory=%s", version_dir, NULL); goto out; } @@ -112,13 +115,14 @@ glusterd_hooks_create_hooks_directory(char *basedir) len = snprintf(path, sizeof(path), "%s/%s", version_dir, cmd_subdir); if ((len < 0) || (len >= sizeof(path))) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } ret = mkdir_p(path, 0755, _gf_true); if (ret) { - gf_msg(THIS->name, GF_LOG_CRITICAL, errno, GD_MSG_CREATE_DIR_FAILED, - "Unable to create %s", path); + gf_smsg(this->name, GF_LOG_CRITICAL, errno, + GD_MSG_CREATE_DIR_FAILED, "Path=%s", path, NULL); goto out; } @@ -126,13 +130,15 @@ glusterd_hooks_create_hooks_directory(char *basedir) len = snprintf(path, sizeof(path), "%s/%s/%s", version_dir, cmd_subdir, type_subdir[type]); if ((len < 0) || (len >= sizeof(path))) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, + NULL); ret = -1; goto out; } ret = mkdir_p(path, 0755, _gf_true); if (ret) { - gf_msg(THIS->name, GF_LOG_CRITICAL, errno, - GD_MSG_CREATE_DIR_FAILED, "Unable to create %s", path); + gf_smsg(this->name, GF_LOG_CRITICAL, errno, + GD_MSG_CREATE_DIR_FAILED, "Path=%s", path, NULL); goto out; } } @@ -200,20 +206,31 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner) int i = 0; int count = 0; int ret = -1; + int flag = 0; char query[1024] = { 0, }; char *key = NULL; char *value = NULL; + char *inet_family = NULL; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT(this); ret = dict_get_int32(dict, "count", &count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=count", NULL); goto out; + } /* This will not happen unless op_ctx * is corrupted*/ - if (!count) + if (!count) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, "count", + NULL); goto out; + } runner_add_arg(runner, "-o"); for (i = 1; ret == 0; i++) { @@ -228,9 +245,23 @@ glusterd_hooks_set_volume_args(dict_t *dict, runner_t *runner) continue; runner_argprintf(runner, "%s=%s", key, value); + if ((strncmp(key, "cluster.enable-shared-storage", + SLEN("cluster.enable-shared-storage")) == 0 || + strncmp(key, "enable-shared-storage", + SLEN("enable-shared-storage")) == 0) && + strncmp(value, "enable", SLEN("enable")) == 0) + flag = 1; } glusterd_hooks_add_custom_args(dict, runner); + if (flag == 1) { + ret = dict_get_str_sizen(this->options, "transport.address-family", + &inet_family); + if (!ret) { + runner_argprintf(runner, "transport.address-family=%s", + inet_family); + } + } ret = 0; out: @@ -357,27 +388,31 @@ glusterd_hooks_run_hooks(char *hooks_path, glusterd_op_t op, dict_t *op_ctx, lines = GF_CALLOC(1, N * sizeof(*lines), gf_gld_mt_charptr); if (!lines) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); ret = -1; goto out; } ret = -1; line_count = 0; - GF_SKIP_IRRELEVANT_ENTRIES(entry, hookdir, scratch); - while (entry) { + + while ((entry = sys_readdir(hookdir, scratch))) { + if (gf_irrelevant_entry(entry)) + continue; if (line_count == N - 1) { N *= 2; lines = GF_REALLOC(lines, N * sizeof(char *)); - if (!lines) + if (!lines) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } } if (glusterd_is_hook_enabled(entry->d_name)) { lines[line_count] = gf_strdup(entry->d_name); line_count++; } - - GF_SKIP_IRRELEVANT_ENTRIES(entry, hookdir, scratch); } lines[line_count] = NULL; @@ -461,31 +496,40 @@ glusterd_hooks_stub_init(glusterd_hooks_stub_t **stub, char *scriptdir, int ret = -1; glusterd_hooks_stub_t *hooks_stub = NULL; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT(this); GF_ASSERT(stub); if (!stub) goto out; hooks_stub = GF_CALLOC(1, sizeof(*hooks_stub), gf_gld_mt_hooks_stub_t); - if (!hooks_stub) + if (!hooks_stub) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } CDS_INIT_LIST_HEAD(&hooks_stub->all_hooks); hooks_stub->op = op; hooks_stub->scriptdir = gf_strdup(scriptdir); - if (!hooks_stub->scriptdir) + if (!hooks_stub->scriptdir) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "scriptdir=%s", scriptdir, NULL); goto out; + } hooks_stub->op_ctx = dict_copy_with_ref(op_ctx, hooks_stub->op_ctx); - if (!hooks_stub->op_ctx) + if (!hooks_stub->op_ctx) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_COPY_FAIL, NULL); goto out; + } *stub = hooks_stub; ret = 0; out: if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_POST_HOOK_STUB_INIT_FAIL, - "Failed to initialize " - "post hooks stub"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_HOOK_STUB_INIT_FAIL, + NULL); glusterd_hooks_stub_cleanup(hooks_stub); } @@ -547,12 +591,20 @@ glusterd_hooks_priv_init(glusterd_hooks_private_t **new) int ret = -1; glusterd_hooks_private_t *hooks_priv = NULL; - if (!new) + xlator_t *this = NULL; + this = THIS; + GF_ASSERT(this); + + if (!new) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } hooks_priv = GF_CALLOC(1, sizeof(*hooks_priv), gf_gld_mt_hooks_priv_t); - if (!hooks_priv) + if (!hooks_priv) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } pthread_mutex_init(&hooks_priv->mutex, NULL); pthread_cond_init(&hooks_priv->cond, NULL); diff --git a/xlators/mgmt/glusterd/src/glusterd-log-ops.c b/xlators/mgmt/glusterd/src/glusterd-log-ops.c index a48923e26e1..34abf35cb00 100644 --- a/xlators/mgmt/glusterd/src/glusterd-log-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-log-ops.c @@ -43,6 +43,7 @@ __glusterd_handle_log_rotate(rpcsvc_request_t *req) if (ret < 0) { // failed to decode msg; req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } @@ -75,7 +76,7 @@ __glusterd_handle_log_rotate(rpcsvc_request_t *req) "for volume %s", volname); - ret = dict_set_uint64(dict, "rotate-key", (uint64_t)time(NULL)); + ret = dict_set_uint64(dict, "rotate-key", (uint64_t)gf_time()); if (ret) goto out; @@ -138,6 +139,8 @@ glusterd_op_stage_log_rotate(dict_t *dict, char **op_errstr) /* If no brick is specified, do log-rotate for all the bricks in the volume */ if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=brick", NULL); ret = 0; goto out; } @@ -204,8 +207,11 @@ glusterd_op_log_rotate(dict_t *dict) ret = dict_get_str(dict, "brick", &brick); /* If no brick is specified, do log-rotate for all the bricks in the volume */ - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=brick", NULL); goto cont; + } ret = glusterd_brickinfo_new_from_brick(brick, &tmpbrkinfo, _gf_false, NULL); diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h index 17052cee263..d7257e1a7b5 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mem-types.h +++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h @@ -27,6 +27,7 @@ typedef enum gf_gld_mem_types_ { gf_gld_mt_mop_stage_req_t, gf_gld_mt_probe_ctx_t, gf_gld_mt_glusterd_volinfo_t, + gf_gld_mt_volinfo_dict_data_t, gf_gld_mt_glusterd_brickinfo_t, gf_gld_mt_peer_hostname_t, gf_gld_mt_defrag_info, diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h index f75ba905372..3a1e600fb03 100644 --- a/xlators/mgmt/glusterd/src/glusterd-messages.h +++ b/xlators/mgmt/glusterd/src/glusterd-messages.h @@ -46,7 +46,7 @@ GLFS_MSGID( GD_MSG_SNAP_STATUS_FAIL, GD_MSG_SNAP_INIT_FAIL, GD_MSG_VOLINFO_SET_FAIL, GD_MSG_VOLINFO_GET_FAIL, GD_MSG_BRICK_CREATION_FAIL, GD_MSG_BRICK_GET_INFO_FAIL, GD_MSG_BRICK_NEW_INFO_FAIL, GD_MSG_LVS_FAIL, - GD_MSG_SETXATTR_FAIL, GD_MSG_UMOUNTING_SNAP_BRICK, GD_MSG_OP_UNSUPPORTED, + GD_MSG_SET_XATTR_FAIL, GD_MSG_UMOUNTING_SNAP_BRICK, GD_MSG_OP_UNSUPPORTED, GD_MSG_SNAP_NOT_FOUND, GD_MSG_FS_LABEL_UPDATE_FAIL, GD_MSG_LVM_MOUNT_FAILED, GD_MSG_DICT_SET_FAILED, GD_MSG_CANONICALIZE_FAIL, GD_MSG_DICT_GET_FAILED, GD_MSG_SNAP_INFO_FAIL, GD_MSG_SNAP_VOL_CONFIG_FAIL, @@ -78,7 +78,7 @@ GLFS_MSGID( GD_MSG_COMMIT_OP_FAIL, GD_MSG_PEER_LIST_CREATE_FAIL, GD_MSG_BRICK_OP_FAIL, GD_MSG_OPINFO_SET_FAIL, GD_MSG_OP_EVENT_UNLOCK_FAIL, GD_MSG_MGMTV3_OP_RESP_FAIL, GD_MSG_PEER_NOT_FOUND, GD_MSG_REQ_DECODE_FAIL, - GD_MSG_DICT_SERL_LENGTH_GET_FAIL, GD_MSG_ALREADY_STOPPED, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, GD_MSG_ALREADY_STOPPED, GD_MSG_PRE_VALD_RESP_FAIL, GD_MSG_SVC_GET_FAIL, GD_MSG_VOLFILE_NOT_FOUND, GD_MSG_OP_EVENT_LOCK_FAIL, GD_MSG_NON_STRIPE_VOL, GD_MSG_SNAPD_OBJ_GET_FAIL, GD_MSG_QUOTA_DISABLED, GD_MSG_CACHE_MINMAX_SIZE_INVALID, @@ -116,7 +116,7 @@ GLFS_MSGID( GD_MSG_PARSE_BRICKINFO_FAIL, GD_MSG_VERS_STORE_FAIL, GD_MSG_HEADER_ADD_FAIL, GD_MSG_QUOTA_CONF_WRITE_FAIL, GD_MSG_QUOTA_CONF_CORRUPT, GD_MSG_FORK_FAIL, GD_MSG_CKSUM_COMPUTE_FAIL, GD_MSG_VERS_CKSUM_STORE_FAIL, - GD_MSG_GETXATTR_FAIL, GD_MSG_CONVERSION_FAILED, GD_MSG_VOL_NOT_DISTRIBUTE, + GD_MSG_GET_XATTR_FAIL, GD_MSG_CONVERSION_FAILED, GD_MSG_VOL_NOT_DISTRIBUTE, GD_MSG_VOL_STOPPED, GD_MSG_OPCTX_GET_FAIL, GD_MSG_TASKID_GEN_FAIL, GD_MSG_REBALANCE_ID_MISSING, GD_MSG_NO_REBALANCE_PFX_IN_VOLNAME, GD_MSG_DEFRAG_STATUS_UPDATE_FAIL, GD_MSG_UUID_GEN_STORE_FAIL, @@ -302,6 +302,150 @@ GLFS_MSGID( GD_MSG_SHD_OBJ_GET_FAIL, GD_MSG_SVC_ATTACH_FAIL, GD_MSG_ATTACH_INFO, GD_MSG_DETACH_INFO, GD_MSG_SVC_DETACH_FAIL, GD_MSG_RPC_TRANSPORT_GET_PEERNAME_FAIL, GD_MSG_CLUSTER_RC_ENABLE, - GD_MSG_NFS_GANESHA_DISABLED, GD_MSG_GANESHA_NOT_RUNNING); + GD_MSG_NFS_GANESHA_DISABLED, GD_MSG_GANESHA_NOT_RUNNING, GD_MSG_SNAP_WARN, + GD_MSG_BRICK_SUBVOL_VERIFY_FAIL, GD_MSG_REMOVE_ARBITER_BRICK, + GD_MSG_BRICK_NOT_DECOM, GD_MSG_BRICK_STOPPED, GD_MSG_BRICK_DEAD, + GD_MSG_BRICK_HOST_NOT_FOUND, GD_MSG_BRICK_HOST_DOWN, GD_MSG_BRICK_DELETE, + GD_MSG_BRICK_NO_REMOVE_CMD, GD_MSG_MIGRATION_PROG, GD_MSG_MIGRATION_FAIL, + GD_MSG_COPY_FAIL, GD_MSG_REALPATH_GET_FAIL, + GD_MSG_ARBITER_BRICK_SET_INFO_FAIL, GD_MSG_STRCHR_FAIL, GD_MSG_SPLIT_FAIL, + GD_MSG_ALLOC_AND_COPY_UUID_FAIL, GD_MSG_VOL_SHD_NOT_COMP, + GD_MSG_BITROT_NOT_ENABLED, GD_MSG_CREATE_BRICK_DIR_FAILED, + GD_MSG_CREATE_GLUSTER_DIR_FAILED, GD_MSG_BRICK_CREATE_MNTPNT, + GD_MSG_BRICK_CREATE_ROOT, GD_MSG_SET_XATTR_BRICK_FAIL, + GD_MSG_REMOVE_XATTR_FAIL, GD_MSG_XLATOR_NOT_DEFINED, + GD_MSG_BRICK_NOT_RUNNING, GD_MSG_INCORRECT_BRICK, GD_MSG_UUID_GET_FAIL, + GD_MSG_INVALID_ARGUMENT, GD_MSG_FRAME_CREATE_FAIL, + GD_MSG_SNAPSHOT_NOT_THIN_PROVISIONED, GD_MSG_VOL_STOP_ARGS_GET_FAILED, + GD_MSG_LSTAT_FAIL, GD_MSG_VOLUME_NOT_IMPORTED, + GD_MSG_ADD_BRICK_MNT_INFO_FAIL, GD_MSG_GET_MNT_ENTRY_INFO_FAIL, + GD_MSG_QUORUM_CLUSTER_COUNT_GET_FAIL, GD_MSG_POST_COMMIT_OP_FAIL, + GD_MSG_POST_COMMIT_FROM_UUID_REJCT, GD_MSG_POST_COMMIT_REQ_SEND_FAIL); + +#define GD_MSG_INVALID_ENTRY_STR "Invalid data entry" +#define GD_MSG_INVALID_ARGUMENT_STR \ + "Invalid arguments have been given to function" +#define GD_MSG_GARBAGE_ARGS_STR "Garbage args received" +#define GD_MSG_BRICK_SUBVOL_VERIFY_FAIL_STR "Brick's subvol verification fail" +#define GD_MSG_REMOVE_ARBITER_BRICK_STR "Failed to remove arbiter bricks" +#define GD_MSG_DICT_GET_FAILED_STR "Dict get failed" +#define GD_MSG_DICT_SET_FAILED_STR "Dict set failed" +#define GD_MSG_BRICK_NOT_FOUND_STR "Brick not found in volume" +#define GD_MSG_BRICK_NOT_DECOM_STR "Brick is not decommissoned" +#define GD_MSG_BRICK_STOPPED_STR "Found stopped brick" +#define GD_MSG_BRICK_DEAD_STR "Found dead brick" +#define GD_MSG_BRICK_HOST_NOT_FOUND_STR \ + "Host node of the brick is not a part of cluster" +#define GD_MSG_BRICK_HOST_DOWN_STR "Host node of the brick is down" +#define GD_MSG_BRICK_DELETE_STR \ + "Deleting all the bricks of the volume is not allowed" +#define GD_MSG_BRICK_NO_REMOVE_CMD_STR "No remove-brick command issued" +#define GD_MSG_INCORRECT_BRICK_STR "Incorrect brick for volume" +#define GD_MSG_MIGRATION_PROG_STR "Migration is in progress" +#define GD_MSG_MIGRATION_FAIL_STR "Migration has failed" +#define GD_MSG_XLATOR_NOT_DEFINED_STR "Xlator not defined" +#define GD_MSG_DICT_CREATE_FAIL_STR "Failed to create dictionary" +#define GD_MSG_COPY_FAIL_STR "Failed to copy" +#define GD_MSG_UUID_GET_FAIL_STR "Failed to get the uuid of local glusterd" +#define GD_MSG_GEO_REP_START_FAILED_STR "Georep start failed for volume" +#define GD_MSG_REALPATH_GET_FAIL_STR "Failed to get realpath" +#define GD_MSG_FILE_NOT_FOUND_STR "File not found in directory" +#define GD_MSG_SRC_FILE_ERROR_STR "Error in source file" +#define GD_MSG_DICT_UNSERIALIZE_FAIL_STR "Failed to unserialize dict" +#define GD_MSG_VOL_ID_SET_FAIL_STR "Failed to set volume id" +#define GD_MSG_ARBITER_BRICK_SET_INFO_FAIL_STR \ + "Failed to add arbiter info to brick" +#define GD_MSG_NO_MEMORY_STR "Out of memory" +#define GD_MSG_GLUSTERD_UMOUNT_FAIL_STR "Failed to unmount path" +#define GD_MSG_PEER_ADD_FAIL_STR "Failed to add new peer" +#define GD_MSG_BRICK_GET_INFO_FAIL_STR "Failed to get brick info" +#define GD_MSG_STRCHR_FAIL_STR "Failed to get the character" +#define GD_MSG_SPLIT_FAIL_STR "Failed to split" +#define GD_MSG_VOLINFO_GET_FAIL_STR "Failed to get volinfo" +#define GD_MSG_PEER_NOT_FOUND_STR "Failed to find peer info" +#define GD_MSG_DICT_COPY_FAIL_STR "Failed to copy values from dictionary" +#define GD_MSG_ALLOC_AND_COPY_UUID_FAIL_STR \ + "Failed to allocate memory or copy uuid" +#define GD_MSG_VOL_NOT_FOUND_STR "Volume not found" +#define GD_MSG_PEER_DISCONNECTED_STR "Peer is disconnected" +#define GD_MSG_QUOTA_GET_STAT_FAIL_STR "Failed to get quota status" +#define GD_MSG_SNAP_STATUS_FAIL_STR "Failed to get status of snapd" +#define GD_MSG_VALIDATE_FAILED_STR "Failed to validate volume" +#define GD_MSG_VOL_NOT_STARTED_STR "Volume is not started" +#define GD_MSG_VOL_SHD_NOT_COMP_STR "Volume is not Self-heal compatible" +#define GD_MSG_SELF_HEALD_DISABLED_STR "Self-heal daemon is disabled" +#define GD_MSG_NFS_GANESHA_DISABLED_STR "NFS server is disabled" +#define GD_MSG_QUOTA_DISABLED_STR "Quota is disabled" +#define GD_MSG_BITROT_NOT_RUNNING_STR "Bitrot is not enabled" +#define GD_MSG_BITROT_NOT_ENABLED_STR "Volume does not have bitrot enabled" +#define GD_MSG_SNAPD_NOT_RUNNING_STR "Snapd is not enabled" +#define GD_MSG_STRDUP_FAILED_STR "Strdup operation failed" +#define GD_MSG_QUORUM_CLUSTER_COUNT_GET_FAIL_STR \ + "Failed to get quorum cluster counts" +#define GD_MSG_GLUSTER_SERVICE_START_FAIL_STR "Failed to start glusterd service" +#define GD_MSG_PEER_ADDRESS_GET_FAIL_STR "Failed to get the address of peer" +#define GD_MSG_INVALID_SLAVE_STR "Volume is not a slave volume" +#define GD_MSG_BRICK_NOT_RUNNING_STR "One or more bricks are not running" +#define GD_MSG_BRK_MNTPATH_GET_FAIL_STR "Failed to get brick mount device" +#define GD_MSG_SNAPSHOT_NOT_THIN_PROVISIONED_STR \ + "Snapshot is supported only for thin provisioned LV." +#define GD_MSG_SNAP_DEVICE_NAME_GET_FAIL_STR \ + "Failed to copy snapshot device name" +#define GD_MSG_SNAP_NOT_FOUND_STR "Snapshot does not exist" +#define GD_MSG_CREATE_BRICK_DIR_FAILED_STR "Failed to create brick directory" +#define GD_MSG_LSTAT_FAIL_STR "Lstat operation failed" +#define GD_MSG_DIR_OP_FAILED_STR \ + "The provided path is already present. It is not a directory" +#define GD_MSG_BRICK_CREATION_FAIL_STR \ + "Brick isn't allowed to be created inside glusterd's working directory." +#define GD_MSG_BRICK_CREATE_ROOT_STR \ + "The brick is being created in the root partition. It is recommended " \ + "that you don't use the system's root partition for storage backend." +#define GD_MSG_BRICK_CREATE_MNTPNT_STR \ + "The brick is a mount point. Please create a sub-directory under the " \ + "mount point and use that as the brick directory." +#define GD_MSG_CREATE_GLUSTER_DIR_FAILED_STR \ + "Failed to create glusterfs directory" +#define GD_MSG_VOLINFO_IMPORT_FAIL_STR "Volume is not yet imported" +#define GD_MSG_BRICK_SET_INFO_FAIL_STR \ + "Failed to add brick mount details to dict" +#define GD_MSG_SET_XATTR_BRICK_FAIL_STR \ + "Glusterfs is not supported on brick. Setting extended attribute failed" +#define GD_MSG_SET_XATTR_FAIL_STR "Failed to set extended attribute" +#define GD_MSG_REMOVE_XATTR_FAIL_STR "Failed to remove extended attribute" +#define GD_MSG_XLATOR_SET_OPT_FAIL_STR "Failed to set xlator type" +#define GD_MSG_XLATOR_LINK_FAIL_STR \ + "Failed to do the link of xlator with children" +#define GD_MSG_READ_ERROR_STR "Failed to read directory" +#define GD_MSG_INCOMPATIBLE_VALUE_STR "Incompatible transport type" +#define GD_MSG_VOL_STOP_ARGS_GET_FAILED_STR "Failed to get volume stop args" +#define GD_MSG_FRAME_CREATE_FAIL_STR "Failed to create frame" +#define GD_MSG_VOLUME_NOT_IMPORTED_STR "Volume has not been imported" +#define GD_MSG_ADD_BRICK_MNT_INFO_FAIL_STR \ + "Failed to add brick mount details to dict" +#define GD_MSG_GET_MNT_ENTRY_INFO_FAIL_STR "Failed to get mount entry details" +#define GD_MSG_BRICKPATH_ROOT_GET_FAIL_STR "failed to get brick root details" +#define GD_MSG_VOL_INFO_REQ_RECVD_STR "Received get volume info req" +#define GD_MSG_NO_FLAG_SET_STR "No flags set" +#define GD_MSG_CREATE_DIR_FAILED_STR "Failed to create directory" +#define GD_MSG_POST_HOOK_STUB_INIT_FAIL_STR \ + "Failed to initialize post hooks stub" +#define GD_MSG_FILE_OP_FAILED_STR "File operation failed" +#define GD_MSG_INODE_SIZE_GET_FAIL_STR "Failed to get inode size" +#define GD_MSG_CMD_EXEC_FAIL_STR "Command execution failed" +#define GD_MSG_XLATOR_CREATE_FAIL_STR "Failed to create xlator" +#define GD_MSG_CLRCLK_VOL_REQ_RCVD_STR "Received clear-locks request for volume" +#define GD_MSG_BRK_PORT_NUM_GET_FAIL_STR \ + "Couldn't get port number of local bricks" +#define GD_MSG_CLRLOCKS_MOUNTDIR_CREATE_FAIL_STR \ + "Creating mount directory for clear-locks failed" +#define GD_MSG_CLRLOCKS_CLNT_MOUNT_FAIL_STR \ + "Failed to mount clear-locks maintenance client" +#define GD_MSG_CLRLOCKS_CLNT_UMOUNT_FAIL_STR \ + "Failed to unmount clear-locks mount point" +#define GD_MSG_CLRCLK_SND_CMD_FAIL_STR "Failed to send command for clear-locks" +#define GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL_STR \ + "Failed to allocate memory or get serialized length of dict" +#define GD_MSG_GET_XATTR_FAIL_STR "Failed to get extended attribute" #endif /* !_GLUSTERD_MESSAGES_H_ */ diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c index ef8b4c38571..1069688a89d 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c @@ -165,6 +165,7 @@ glusterd_handle_mgmt_v3_lock_fn(rpcsvc_request_t *req) ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t); if (!ctx) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); ret = -1; goto out; } @@ -174,6 +175,7 @@ glusterd_handle_mgmt_v3_lock_fn(rpcsvc_request_t *req) ctx->dict = dict_new(); if (!ctx->dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -181,8 +183,8 @@ glusterd_handle_mgmt_v3_lock_fn(rpcsvc_request_t *req) ret = dict_unserialize(lock_req.dict.dict_val, lock_req.dict.dict_len, &ctx->dict); if (ret) { - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to unserialize the dictionary"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); goto out; } @@ -264,8 +266,8 @@ glusterd_mgmt_v3_pre_validate_send_resp(rpcsvc_request_t *req, int32_t op, ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val, &rsp.dict.dict_len); if (ret < 0) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "failed to get serialized length of dict"); + gf_smsg(this->name, GF_LOG_ERROR, 0, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } @@ -315,20 +317,21 @@ glusterd_handle_pre_validate_fn(rpcsvc_request_t *req) } dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict); if (ret) { - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to unserialize the dictionary"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); goto out; } rsp_dict = dict_new(); if (!rsp_dict) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, - "Failed to get new dictionary"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); return -1; } @@ -391,8 +394,8 @@ glusterd_mgmt_v3_brick_op_send_resp(rpcsvc_request_t *req, int32_t op, ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val, &rsp.dict.dict_len); if (ret < 0) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "failed to get serialized length of dict"); + gf_smsg(this->name, GF_LOG_ERROR, 0, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } @@ -441,20 +444,21 @@ glusterd_handle_brick_op_fn(rpcsvc_request_t *req) } dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict); if (ret) { - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to unserialize the dictionary"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); goto out; } rsp_dict = dict_new(); if (!rsp_dict) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, - "Failed to get new dictionary"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); return -1; } @@ -518,8 +522,8 @@ glusterd_mgmt_v3_commit_send_resp(rpcsvc_request_t *req, int32_t op, ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val, &rsp.dict.dict_len); if (ret < 0) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "failed to get serialized length of dict"); + gf_smsg(this->name, GF_LOG_ERROR, 0, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } @@ -569,20 +573,21 @@ glusterd_handle_commit_fn(rpcsvc_request_t *req) } dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict); if (ret) { - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to unserialize the dictionary"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); goto out; } rsp_dict = dict_new(); if (!rsp_dict) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, - "Failed to get new dictionary"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); return -1; } @@ -621,6 +626,136 @@ out: } static int +glusterd_mgmt_v3_post_commit_send_resp(rpcsvc_request_t *req, int32_t op, + int32_t status, char *op_errstr, + uint32_t op_errno, dict_t *rsp_dict) +{ + gd1_mgmt_v3_post_commit_rsp rsp = { + {0}, + }; + int ret = -1; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + GF_ASSERT(req); + + rsp.op_ret = status; + glusterd_get_uuid(&rsp.uuid); + rsp.op = op; + rsp.op_errno = op_errno; + if (op_errstr) + rsp.op_errstr = op_errstr; + else + rsp.op_errstr = ""; + + ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val, + &rsp.dict.dict_len); + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, 0, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); + goto out; + } + + ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL, + (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_rsp); + + GF_FREE(rsp.dict.dict_val); +out: + gf_msg_debug(this->name, 0, "Responded to post commit, ret: %d", ret); + return ret; +} + +static int +glusterd_handle_post_commit_fn(rpcsvc_request_t *req) +{ + int32_t ret = -1; + gd1_mgmt_v3_post_commit_req op_req = { + {0}, + }; + xlator_t *this = NULL; + char *op_errstr = NULL; + dict_t *dict = NULL; + dict_t *rsp_dict = NULL; + uint32_t op_errno = 0; + + this = THIS; + GF_ASSERT(this); + GF_ASSERT(req); + + ret = xdr_to_generic(req->msg[0], &op_req, + (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_req); + if (ret < 0) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, + "Failed to decode post commit " + "request received from peer"); + req->rpc_err = GARBAGE_ARGS; + goto out; + } + + if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND, + "%s doesn't " + "belong to the cluster. Ignoring request.", + uuid_utoa(op_req.uuid)); + ret = -1; + goto out; + } + + dict = dict_new(); + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); + goto out; + } + + ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict); + if (ret) { + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); + goto out; + } + + rsp_dict = dict_new(); + if (!rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); + return -1; + } + + ret = gd_mgmt_v3_post_commit_fn(op_req.op, dict, &op_errstr, &op_errno, + rsp_dict); + + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL, + "post commit failed on operation %s", gd_op_list[op_req.op]); + } + + ret = glusterd_mgmt_v3_post_commit_send_resp(req, op_req.op, ret, op_errstr, + op_errno, rsp_dict); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL, + "Failed to send post commit " + "response for operation %s", + gd_op_list[op_req.op]); + goto out; + } + +out: + if (op_errstr && (strcmp(op_errstr, ""))) + GF_FREE(op_errstr); + + free(op_req.dict.dict_val); + + if (dict) + dict_unref(dict); + + if (rsp_dict) + dict_unref(rsp_dict); + + /* Return 0 from handler to avoid double deletion of req obj */ + return 0; +} + +static int glusterd_mgmt_v3_post_validate_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status, char *op_errstr, dict_t *rsp_dict) @@ -646,8 +781,8 @@ glusterd_mgmt_v3_post_validate_send_resp(rpcsvc_request_t *req, int32_t op, ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val, &rsp.dict.dict_len); if (ret < 0) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "failed to get serialized length of dict"); + gf_smsg(this->name, GF_LOG_ERROR, 0, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } @@ -696,20 +831,21 @@ glusterd_handle_post_validate_fn(rpcsvc_request_t *req) } dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict); if (ret) { - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to unserialize the dictionary"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); goto out; } rsp_dict = dict_new(); if (!rsp_dict) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, - "Failed to get new dictionary"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); return -1; } @@ -867,6 +1003,7 @@ glusterd_handle_mgmt_v3_unlock_fn(rpcsvc_request_t *req) ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t); if (!ctx) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL); ret = -1; goto out; } @@ -876,6 +1013,7 @@ glusterd_handle_mgmt_v3_unlock_fn(rpcsvc_request_t *req) ctx->dict = dict_new(); if (!ctx->dict) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -883,8 +1021,8 @@ glusterd_handle_mgmt_v3_unlock_fn(rpcsvc_request_t *req) ret = dict_unserialize(lock_req.dict.dict_val, lock_req.dict.dict_len, &ctx->dict); if (ret) { - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, - "failed to unserialize the dictionary"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); goto out; } @@ -955,6 +1093,12 @@ glusterd_handle_commit(rpcsvc_request_t *req) } static int +glusterd_handle_post_commit(rpcsvc_request_t *req) +{ + return glusterd_big_locked_handler(req, glusterd_handle_post_commit_fn); +} + +static int glusterd_handle_post_validate(rpcsvc_request_t *req) { return glusterd_big_locked_handler(req, glusterd_handle_post_validate_fn); @@ -978,6 +1122,9 @@ static rpcsvc_actor_t gd_svc_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = { GLUSTERD_MGMT_V3_BRICK_OP, DRC_NA, 0}, [GLUSTERD_MGMT_V3_COMMIT] = {"COMMIT", glusterd_handle_commit, NULL, GLUSTERD_MGMT_V3_COMMIT, DRC_NA, 0}, + [GLUSTERD_MGMT_V3_POST_COMMIT] = {"POST_COMMIT", + glusterd_handle_post_commit, NULL, + GLUSTERD_MGMT_V3_POST_COMMIT, DRC_NA, 0}, [GLUSTERD_MGMT_V3_POST_VALIDATE] = {"POST_VAL", glusterd_handle_post_validate, NULL, GLUSTERD_MGMT_V3_POST_VALIDATE, DRC_NA, diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c index bf9b5a870a0..bca7221062b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c @@ -86,6 +86,11 @@ gd_mgmt_v3_collate_errors(struct syncargs *args, int op_ret, int op_errno, peer_str, err_string); break; } + case GLUSTERD_MGMT_V3_POST_COMMIT: { + snprintf(op_err, sizeof(op_err), "Post commit failed on %s. %s", + peer_str, err_string); + break; + } case GLUSTERD_MGMT_V3_POST_VALIDATE: { snprintf(op_err, sizeof(op_err), "Post Validation failed on %s. %s", peer_str, @@ -187,6 +192,16 @@ gd_mgmt_v3_pre_validate_fn(glusterd_op_t op, dict_t *dict, char **op_errstr, goto out; } break; + case GD_OP_REMOVE_BRICK: + ret = glusterd_op_stage_remove_brick(dict, op_errstr); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, + GD_MSG_PRE_VALIDATION_FAIL, + "Remove brick prevalidation failed."); + goto out; + } + break; + case GD_OP_RESET_BRICK: ret = glusterd_reset_brick_prevalidate(dict, op_errstr, rsp_dict); if (ret) { @@ -337,6 +352,15 @@ gd_mgmt_v3_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr, } break; } + case GD_OP_REMOVE_BRICK: { + ret = glusterd_op_remove_brick(dict, op_errstr); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL, + "Remove-brick commit failed."); + goto out; + } + break; + } case GD_OP_RESET_BRICK: { ret = glusterd_op_reset_brick(dict, rsp_dict); if (ret) { @@ -386,6 +410,47 @@ out: } int32_t +gd_mgmt_v3_post_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr, + uint32_t *op_errno, dict_t *rsp_dict) +{ + int32_t ret = -1; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); + GF_ASSERT(dict); + GF_ASSERT(op_errstr); + GF_VALIDATE_OR_GOTO(this->name, op_errno, out); + GF_ASSERT(rsp_dict); + + switch (op) { + case GD_OP_ADD_BRICK: + ret = glusterd_post_commit_add_brick(dict, op_errstr); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL, + "Add-brick post commit failed."); + goto out; + } + break; + case GD_OP_REPLACE_BRICK: + ret = glusterd_post_commit_replace_brick(dict, op_errstr); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL, + "Replace-brick post commit failed."); + goto out; + } + break; + default: + break; + } + + ret = 0; +out: + gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret); + return ret; +} + +int32_t gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict, char **op_errstr, dict_t *rsp_dict) { @@ -582,15 +647,21 @@ gd_mgmt_v3_lock(glusterd_op_t op, dict_t *op_ctx, glusterd_peerinfo_t *peerinfo, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); req.op = op; GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL); goto out; + } ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_LOCK, @@ -759,6 +830,7 @@ glusterd_pre_validate_aggr_rsp_dict(glusterd_op_t op, dict_t *aggr, dict_t *rsp) goto out; } case GD_OP_STOP_VOLUME: + case GD_OP_REMOVE_BRICK: case GD_OP_PROFILE_VOLUME: case GD_OP_DEFRAG_BRICK_VOLUME: case GD_OP_REBALANCE: @@ -897,15 +969,21 @@ gd_mgmt_v3_pre_validate_req(glusterd_op_t op, dict_t *op_ctx, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); req.op = op; GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL); goto out; + } ret = gd_syncop_submit_request( peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog, @@ -948,7 +1026,7 @@ glusterd_mgmt_v3_pre_validate(glusterd_op_t op, dict_t *req_dict, } if (op == GD_OP_PROFILE_VOLUME || op == GD_OP_STOP_VOLUME || - op == GD_OP_REBALANCE) { + op == GD_OP_REBALANCE || op == GD_OP_REMOVE_BRICK) { ret = glusterd_validate_quorum(this, op, req_dict, op_errstr); if (ret) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SERVER_QUORUM_NOT_MET, @@ -1076,6 +1154,7 @@ glusterd_mgmt_v3_build_payload(dict_t **req, char **op_errstr, dict_t *dict, case GD_OP_START_VOLUME: case GD_OP_STOP_VOLUME: case GD_OP_ADD_BRICK: + case GD_OP_REMOVE_BRICK: case GD_OP_DEFRAG_BRICK_VOLUME: case GD_OP_REPLACE_BRICK: case GD_OP_RESET_BRICK: @@ -1258,15 +1337,21 @@ gd_mgmt_v3_brick_op_req(glusterd_op_t op, dict_t *op_ctx, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); req.op = op; GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL); goto out; + } ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_BRICK_OP, @@ -1515,15 +1600,21 @@ gd_mgmt_v3_commit_req(glusterd_op_t op, dict_t *op_ctx, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); req.op = op; GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL); goto out; + } ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_COMMIT, @@ -1559,12 +1650,25 @@ glusterd_mgmt_v3_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, GF_ASSERT(op_errstr); GF_VALIDATE_OR_GOTO(this->name, op_errno, out); - if (op == GD_OP_REBALANCE || op == GD_OP_DEFRAG_BRICK_VOLUME) { - ret = glusterd_set_rebalance_id_in_rsp_dict(req_dict, op_ctx); - if (ret) { - gf_log(this->name, GF_LOG_WARNING, - "Failed to set rebalance id in dict."); - } + switch (op) { + case GD_OP_REBALANCE: + case GD_OP_DEFRAG_BRICK_VOLUME: + + ret = glusterd_set_rebalance_id_in_rsp_dict(req_dict, op_ctx); + if (ret) { + gf_log(this->name, GF_LOG_WARNING, + "Failed to set rebalance id in dict."); + } + break; + case GD_OP_REMOVE_BRICK: + ret = glusterd_set_rebalance_id_for_remove_brick(req_dict, op_ctx); + if (ret) { + gf_log(this->name, GF_LOG_WARNING, + "Failed to set rebalance id for remove-brick in dict."); + } + break; + default: + break; } rsp_dict = dict_new(); if (!rsp_dict) { @@ -1662,6 +1766,274 @@ out: } int32_t +gd_mgmt_v3_post_commit_cbk_fn(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + int32_t ret = -1; + struct syncargs *args = NULL; + gd1_mgmt_v3_post_commit_rsp rsp = { + {0}, + }; + call_frame_t *frame = NULL; + int32_t op_ret = -1; + int32_t op_errno = -1; + dict_t *rsp_dict = NULL; + xlator_t *this = NULL; + uuid_t *peerid = NULL; + + this = THIS; + GF_ASSERT(this); + GF_ASSERT(req); + GF_ASSERT(myframe); + + frame = myframe; + args = frame->local; + peerid = frame->cookie; + frame->local = NULL; + frame->cookie = NULL; + + if (-1 == req->rpc_status) { + op_errno = ENOTCONN; + goto out; + } + + GF_VALIDATE_OR_GOTO_WITH_ERROR(this->name, iov, out, op_errno, EINVAL); + + ret = xdr_to_generic(*iov, &rsp, + (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_rsp); + if (ret < 0) + goto out; + + if (rsp.dict.dict_len) { + /* Unserialize the dictionary */ + rsp_dict = dict_new(); + + ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict); + if (ret < 0) { + free(rsp.dict.dict_val); + goto out; + } else { + rsp_dict->extra_stdfree = rsp.dict.dict_val; + } + } + + gf_uuid_copy(args->uuid, rsp.uuid); + pthread_mutex_lock(&args->lock_dict); + { + ret = glusterd_syncop_aggr_rsp_dict(rsp.op, args->dict, rsp_dict); + } + pthread_mutex_unlock(&args->lock_dict); + + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s", + "Failed to aggregate response from " + " node/brick"); + if (!rsp.op_ret) + op_ret = ret; + else { + op_ret = rsp.op_ret; + op_errno = rsp.op_errno; + } + } else { + op_ret = rsp.op_ret; + op_errno = rsp.op_errno; + } + +out: + if (rsp_dict) + dict_unref(rsp_dict); + + gd_mgmt_v3_collate_errors(args, op_ret, op_errno, rsp.op_errstr, + GLUSTERD_MGMT_V3_POST_COMMIT, *peerid, rsp.uuid); + GF_FREE(peerid); + + if (rsp.op_errstr) + free(rsp.op_errstr); + + /* req->rpc_status set to -1 means, STACK_DESTROY will be called from + * the caller function. + */ + if (req->rpc_status != -1) + STACK_DESTROY(frame->root); + synctask_barrier_wake(args); + return 0; +} + +int32_t +gd_mgmt_v3_post_commit_cbk(struct rpc_req *req, struct iovec *iov, int count, + void *myframe) +{ + return glusterd_big_locked_cbk(req, iov, count, myframe, + gd_mgmt_v3_post_commit_cbk_fn); +} + +int +gd_mgmt_v3_post_commit_req(glusterd_op_t op, dict_t *op_ctx, + glusterd_peerinfo_t *peerinfo, struct syncargs *args, + uuid_t my_uuid, uuid_t recv_uuid) +{ + int32_t ret = -1; + gd1_mgmt_v3_post_commit_req req = { + {0}, + }; + xlator_t *this = NULL; + uuid_t *peerid = NULL; + + this = THIS; + GF_ASSERT(this); + GF_ASSERT(op_ctx); + GF_ASSERT(peerinfo); + GF_ASSERT(args); + + ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, + &req.dict.dict_len); + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); + goto out; + } + + gf_uuid_copy(req.uuid, my_uuid); + req.op = op; + + GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL); + goto out; + } + + ret = gd_syncop_submit_request( + peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog, + GLUSTERD_MGMT_V3_POST_COMMIT, gd_mgmt_v3_post_commit_cbk, + (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_req); +out: + GF_FREE(req.dict.dict_val); + gf_msg_trace(this->name, 0, "Returning %d", ret); + return ret; +} + +int +glusterd_mgmt_v3_post_commit(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, + char **op_errstr, uint32_t *op_errno, + uint32_t txn_generation) +{ + int32_t ret = -1; + int32_t peer_cnt = 0; + dict_t *rsp_dict = NULL; + glusterd_peerinfo_t *peerinfo = NULL; + struct syncargs args = {0}; + uuid_t peer_uuid = {0}; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + + this = THIS; + GF_ASSERT(this); + conf = this->private; + GF_ASSERT(conf); + + GF_ASSERT(op_ctx); + GF_ASSERT(req_dict); + GF_ASSERT(op_errstr); + GF_VALIDATE_OR_GOTO(this->name, op_errno, out); + + rsp_dict = dict_new(); + if (!rsp_dict) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, + "Failed to create response dictionary"); + goto out; + } + + /* Post commit on local node */ + ret = gd_mgmt_v3_post_commit_fn(op, req_dict, op_errstr, op_errno, + rsp_dict); + + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL, + "Post commit failed for " + "operation %s on local node", + gd_op_list[op]); + + if (*op_errstr == NULL) { + ret = gf_asprintf(op_errstr, + "Post commit failed " + "on localhost. Please " + "check log file for details."); + if (ret == -1) + *op_errstr = NULL; + + ret = -1; + } + goto out; + } + + ret = glusterd_syncop_aggr_rsp_dict(op, op_ctx, rsp_dict); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESP_AGGR_FAIL, "%s", + "Failed to aggregate response from " + " node/brick"); + goto out; + } + + dict_unref(rsp_dict); + rsp_dict = NULL; + + /* Sending post commit req to other nodes in the cluster */ + gd_syncargs_init(&args, op_ctx); + ret = synctask_barrier_init((&args)); + if (ret) + goto out; + peer_cnt = 0; + + RCU_READ_LOCK; + cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list) + { + /* Only send requests to peers who were available before the + * transaction started + */ + if (peerinfo->generation > txn_generation) + continue; + if (!peerinfo->connected) + continue; + + if (op != GD_OP_SYNC_VOLUME && + peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) + continue; + + gd_mgmt_v3_post_commit_req(op, req_dict, peerinfo, &args, MY_UUID, + peer_uuid); + peer_cnt++; + } + RCU_READ_UNLOCK; + + if (0 == peer_cnt) { + ret = 0; + goto out; + } + + gd_synctask_barrier_wait((&args), peer_cnt); + + if (args.op_ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL, + "Post commit failed on peers"); + + if (args.errstr) + *op_errstr = gf_strdup(args.errstr); + } + + ret = args.op_ret; + *op_errno = args.op_errno; + + gf_msg_debug(this->name, 0, + "Sent post commit req for %s to %d " + "peers. Returning %d", + gd_op_list[op], peer_cnt, ret); +out: + glusterd_op_modify_op_ctx(op, op_ctx); + return ret; +} + +int32_t gd_mgmt_v3_post_validate_cbk_fn(struct rpc_req *req, struct iovec *iov, int count, void *myframe) { @@ -1751,16 +2123,22 @@ gd_mgmt_v3_post_validate_req(glusterd_op_t op, int32_t op_ret, dict_t *op_ctx, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); req.op = op; req.op_ret = op_ret; GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL); goto out; + } ret = gd_syncop_submit_request( peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog, @@ -1967,15 +2345,21 @@ gd_mgmt_v3_unlock(glusterd_op_t op, dict_t *op_ctx, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); req.op = op; GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_ALLOC_AND_COPY_UUID_FAIL, NULL); goto out; + } ret = gd_syncop_submit_request(peerinfo->rpc, &req, args, peerid, &gd_mgmt_v3_prog, GLUSTERD_MGMT_V3_UNLOCK, @@ -2338,6 +2722,15 @@ glusterd_mgmt_v3_initiate_all_phases(rpcsvc_request_t *req, glusterd_op_t op, goto out; } + /* POST COMMIT OP PHASE */ + ret = glusterd_mgmt_v3_post_commit(op, dict, req_dict, &op_errstr, + &op_errno, txn_generation); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL, + "Post commit Op Failed"); + goto out; + } + /* POST-COMMIT VALIDATE PHASE */ /* As of now, post_validate is not trying to cleanup any failed commands. So as of now, I am sending 0 (op_ret as 0). diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h index 71f793d0397..27dd1849519 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mgmt.h +++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h @@ -28,6 +28,10 @@ gd_mgmt_v3_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr, uint32_t *op_errno, dict_t *rsp_dict); int32_t +gd_mgmt_v3_post_commit_fn(glusterd_op_t op, dict_t *dict, char **op_errstr, + uint32_t *op_errno, dict_t *rsp_dict); + +int32_t gd_mgmt_v3_post_validate_fn(glusterd_op_t op, int32_t op_ret, dict_t *dict, char **op_errstr, dict_t *rsp_dict); @@ -84,4 +88,10 @@ glusterd_reset_brick_prevalidate(dict_t *dict, char **op_errstr, dict_t *rsp_dict); int glusterd_op_reset_brick(dict_t *dict, dict_t *rsp_dict); + +int +glusterd_post_commit_add_brick(dict_t *dict, char **op_errstr); + +int +glusterd_post_commit_replace_brick(dict_t *dict, char **op_errstr); #endif /* _GLUSTERD_MGMT_H_ */ diff --git a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c index 9c4b2fb18cc..645d845ee76 100644 --- a/xlators/mgmt/glusterd/src/glusterd-mountbroker.c +++ b/xlators/mgmt/glusterd/src/glusterd-mountbroker.c @@ -81,6 +81,7 @@ parse_mount_pattern_desc(gf_mount_spec_t *mspec, char *pdesc) mspec->patterns = GF_CALLOC(mspec->len, sizeof(*mspec->patterns), gf_gld_mt_mount_pattern); if (!mspec->patterns) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); ret = -1; goto out; } @@ -261,8 +262,11 @@ make_georep_mountspec(gf_mount_spec_t *mspec, const char *volnames, char *user, int ret = 0; vols = gf_strdup((char *)volnames); - if (!vols) + if (!vols) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "Volume name=%s", volnames, NULL); goto out; + } for (vc = 1, p = vols; *p; p++) { if (*p == ',') @@ -270,8 +274,10 @@ make_georep_mountspec(gf_mount_spec_t *mspec, const char *volnames, char *user, } siz = strlen(volnames) + vc * SLEN("volfile-id="); meetspec = GF_CALLOC(1, siz + 1, gf_gld_mt_georep_meet_spec); - if (!meetspec) + if (!meetspec) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } for (p = vols;;) { vol = strtok_r(p, ",", &savetok); diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 14915b3fc17..c537fc33a85 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -106,6 +106,7 @@ glusterd_txn_opinfo_dict_init() priv->glusterd_txn_opinfo = dict_new(); if (!priv->glusterd_txn_opinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -178,8 +179,10 @@ glusterd_generate_txn_id(dict_t *dict, uuid_t **txn_id) GF_ASSERT(dict); *txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t); - if (!*txn_id) + if (!*txn_id) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } if (priv->op_version < GD_OP_VERSION_3_6_0) gf_uuid_copy(**txn_id, priv->global_txn_id); @@ -541,8 +544,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op, case GD_OP_STOP_VOLUME: brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); - if (!brick_req) + if (!brick_req) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } brick_req->op = GLUSTERD_BRICK_TERMINATE; brick_req->name = brickinfo->path; glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPING); @@ -551,8 +557,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op, brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); - if (!brick_req) + if (!brick_req) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } brick_req->op = GLUSTERD_BRICK_XLATOR_INFO; brick_req->name = brickinfo->path; @@ -561,45 +570,69 @@ glusterd_brick_op_build_payload(glusterd_op_t op, case GD_OP_HEAL_VOLUME: { brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); - if (!brick_req) + if (!brick_req) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } brick_req->op = GLUSTERD_BRICK_XLATOR_OP; brick_req->name = ""; ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"), (int32_t *)&heal_op); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=heal-op", NULL); goto out; + } ret = dict_set_int32n(dict, "xl-op", SLEN("xl-op"), heal_op); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=xl-op", NULL); goto out; + } } break; case GD_OP_STATUS_VOLUME: { brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); - if (!brick_req) + if (!brick_req) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } brick_req->op = GLUSTERD_BRICK_STATUS; brick_req->name = ""; ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"), brickinfo->path); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=brick-name", NULL); goto out; + } } break; case GD_OP_REBALANCE: case GD_OP_DEFRAG_BRICK_VOLUME: brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); - if (!brick_req) + if (!brick_req) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } brick_req->op = GLUSTERD_BRICK_XLATOR_DEFRAG; ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); goto out; + } ret = glusterd_volinfo_find(volname, &volinfo); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_VOLINFO_GET_FAIL, "Volume=%s", volname, NULL); goto out; + } snprintf(name, sizeof(name), "%s-dht", volname); brick_req->name = gf_strdup(name); @@ -608,8 +641,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op, case GD_OP_BARRIER: brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); - if (!brick_req) + if (!brick_req) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } brick_req->op = GLUSTERD_BRICK_BARRIER; brick_req->name = brickinfo->path; break; @@ -623,8 +659,11 @@ glusterd_brick_op_build_payload(glusterd_op_t op, brick_req->dict.dict_val = NULL; ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val, &brick_req->input.input_len); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } *req = brick_req; ret = 0; @@ -646,13 +685,19 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req, GF_ASSERT(op < GD_OP_MAX); GF_ASSERT(op > GD_OP_NONE); GF_ASSERT(req); + xlator_t *this = NULL; + this = THIS; + GF_ASSERT(this); switch (op) { case GD_OP_PROFILE_VOLUME: brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); - if (!brick_req) + if (!brick_req) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } brick_req->op = GLUSTERD_NODE_PROFILE; brick_req->name = ""; @@ -662,8 +707,11 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req, case GD_OP_STATUS_VOLUME: brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); - if (!brick_req) + if (!brick_req) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } brick_req->op = GLUSTERD_NODE_STATUS; brick_req->name = ""; @@ -674,14 +722,20 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req, case GD_OP_SCRUB_ONDEMAND: brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t); - if (!brick_req) + if (!brick_req) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, + NULL); goto out; + } brick_req->op = GLUSTERD_NODE_BITROT; ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); goto out; + } brick_req->name = gf_strdup(volname); break; @@ -694,8 +748,11 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req, ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val, &brick_req->input.input_len); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } *req = brick_req; ret = 0; @@ -703,7 +760,7 @@ glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req, out: if (ret && brick_req) GF_FREE(brick_req); - gf_msg_debug(THIS->name, 0, "Returning %d", ret); + gf_msg_debug(this->name, 0, "Returning %d", ret); return ret; } @@ -719,12 +776,14 @@ glusterd_validate_quorum_options(xlator_t *this, char *fullkey, char *value, goto out; key = strchr(fullkey, '.'); if (key == NULL) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL); ret = -1; goto out; } key++; opt = xlator_volume_option_get(this, key); if (!opt) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, NULL); ret = -1; goto out; } @@ -988,8 +1047,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr) if (check_op_version) { ret = dict_get_uint32(dict, "new-op-version", &new_op_version); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get new_op_version"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=new-op-version", NULL); goto out; } @@ -1043,8 +1102,8 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr) ret = dict_get_str_sizen(dict, "volname", &volname); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get volume name"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); goto out; } @@ -1069,14 +1128,19 @@ glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr) } val_dict = dict_new(); - if (!val_dict) + if (!val_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } for (count = 1; ret != 1; count++) { keystr_len = sprintf(keystr, "key%d", count); ret = dict_get_strn(dict, keystr, keystr_len, &key); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", keystr, NULL); break; + } keystr_len = sprintf(keystr, "value%d", count); ret = dict_get_strn(dict, keystr, keystr_len, &value); @@ -1603,12 +1667,17 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr) 0, }; glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = NULL; + this = THIS; + GF_ASSERT(this); ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname); if (ret) { snprintf(msg, sizeof(msg), "hostname couldn't be " "retrieved from msg"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=hostname", NULL); *op_errstr = gf_strdup(msg); goto out; } @@ -1623,6 +1692,8 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr) "Volume %s " "does not exist", volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_FOUND, + "Volume=%s", volname, NULL); *op_errstr = gf_strdup(msg); goto out; } @@ -1635,6 +1706,8 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr) RCU_READ_UNLOCK; ret = -1; snprintf(msg, sizeof(msg), "%s, is not a friend", hostname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND, + "Peer_name=%s", hostname, NULL); *op_errstr = gf_strdup(msg); goto out; @@ -1645,6 +1718,8 @@ glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr) "%s, is not connected at " "the moment", hostname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_DISCONNECTED, + "Peer_name=%s", hostname, NULL); *op_errstr = gf_strdup(msg); goto out; } @@ -1685,8 +1760,11 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) GF_ASSERT(priv); ret = dict_get_uint32(dict, "cmd", &cmd); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=cmd", NULL); goto out; + } if (cmd & GF_CLI_STATUS_ALL) goto out; @@ -1697,6 +1775,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) "The cluster is operating at " "version 1. Getting the status of quotad is not " "allowed in this state."); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_GET_STAT_FAIL, + msg, NULL); ret = -1; goto out; } @@ -1708,6 +1788,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) "version less than %d. Getting the " "status of snapd is not allowed in this state.", GD_OP_VERSION_3_6_0); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SNAP_STATUS_FAIL, msg, + NULL); ret = -1; goto out; } @@ -1722,17 +1804,23 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, + "Volume=%s", volname, NULL); ret = -1; goto out; } ret = glusterd_validate_volume_id(dict, volinfo); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VALIDATE_FAILED, NULL); goto out; + } ret = glusterd_is_volume_started(volinfo); if (!ret) { snprintf(msg, sizeof(msg), "Volume %s is not started", volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_STARTED, + "Volume=%s", volname, NULL); ret = -1; goto out; } @@ -1746,12 +1834,16 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) ret = -1; snprintf(msg, sizeof(msg), "Volume %s is not Self-heal compatible", volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_SHD_NOT_COMP, + "Volume=%s", volname, NULL); goto out; } if (!shd_enabled) { ret = -1; snprintf(msg, sizeof(msg), "Self-heal Daemon is disabled for volume %s", volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SELF_HEALD_DISABLED, + "Volume=%s", volname, NULL); goto out; } #ifdef BUILD_GNFS @@ -1762,6 +1854,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) ret = -1; snprintf(msg, sizeof(msg), "NFS server is disabled for volume %s", volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_NFS_GANESHA_DISABLED, "Volume=%s", volname, NULL); goto out; } #endif @@ -1772,6 +1866,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) "Volume %s does not have " "quota enabled", volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_DISABLED, + "Volume=%s", volname, NULL); goto out; } } else if ((cmd & GF_CLI_STATUS_BITD) != 0) { @@ -1781,6 +1877,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) "Volume %s does not have " "bitrot enabled", volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED, + "Volume=%s", volname, NULL); goto out; } } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) { @@ -1791,6 +1889,10 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) "bitrot enabled. Scrubber will be enabled " "automatically if bitrot is enabled", volname); + gf_smsg( + this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED, + "Scrubber will be enabled automatically if bitrot is enabled", + "Volume=%s", volname, NULL); goto out; } } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) { @@ -1800,12 +1902,17 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) "Volume %s does not have " "uss enabled", volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SNAPD_NOT_RUNNING, + "Volume=%s", volname, NULL); goto out; } } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) { ret = dict_get_strn(dict, "brick", SLEN("brick"), &brick); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=brick", NULL); goto out; + } ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo, _gf_false); @@ -1814,6 +1921,8 @@ glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr) "No brick %s in" " volume %s", brick, volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_NOT_FOUND, + "Brick=%s, Volume=%s", brick, volname, NULL); ret = -1; goto out; } @@ -2100,8 +2209,10 @@ glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict) ret = -1; dup_opt = dict_new(); - if (!dup_opt) + if (!dup_opt) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } if (!all) { dict_copy(conf->opts, dup_opt); dict_del(dup_opt, key); @@ -2112,8 +2223,11 @@ glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict) ret = dict_set_strn(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION, SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL); goto out; + } ret = glusterd_store_options(this, dup_opt); if (ret) @@ -2124,9 +2238,11 @@ glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict) ret = dict_set_dynstrn(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION, SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL); goto out; - else + } else next_version = NULL; if (!all) { @@ -2410,8 +2526,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict, conf = this->private; ret = dict_get_strn(dict, "key1", SLEN("key1"), &key); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=key1", NULL); goto out; + } ret = dict_get_strn(dict, "value1", SLEN("value1"), &value); if (ret) { @@ -2530,12 +2649,17 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict, } ret = -1; dup_opt = dict_new(); - if (!dup_opt) + if (!dup_opt) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } dict_copy(conf->opts, dup_opt); ret = dict_set_str(dup_opt, key, value); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } ret = glusterd_get_next_global_opt_version_str(conf->opts, &next_version); if (ret) @@ -2543,8 +2667,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict, ret = dict_set_strn(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION, SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL); goto out; + } ret = glusterd_store_options(this, dup_opt); if (ret) @@ -2555,9 +2682,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict, ret = dict_set_dynstrn(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION, SLEN(GLUSTERD_GLOBAL_OPT_VERSION), next_version); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL); goto out; - else + } else next_version = NULL; dup_value = gf_strdup(value); @@ -2565,9 +2694,11 @@ glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict, goto out; ret = dict_set_dynstr(conf->opts, key, dup_value); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; - else + } else dup_value = NULL; /* Protect the allocation from GF_FREE */ out: @@ -3002,6 +3133,8 @@ glusterd_op_sync_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) snprintf(msg, sizeof(msg), "hostname couldn't be " "retrieved from msg"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=hostname", NULL); *op_errstr = gf_strdup(msg); goto out; } @@ -3026,6 +3159,7 @@ glusterd_op_sync_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) if (!rsp_dict) { // this should happen only on source + gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_INVALID_ARGUMENT, NULL); ret = 0; goto out; } @@ -3582,27 +3716,30 @@ glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) } ret = dict_set_int32n(rsp_dict, "type", SLEN("type"), volinfo->type); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=type", NULL); goto out; + } ret = dict_set_int32n(rsp_dict, "brick-index-max", SLEN("brick-index-max"), brick_index); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, - "Error setting brick-index-max to dict"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=brick-index-max", NULL); goto out; } ret = dict_set_int32n(rsp_dict, "other-count", SLEN("other-count"), other_count); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, - "Error setting other-count to dict"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=other-count", NULL); goto out; } ret = dict_set_int32n(rsp_dict, "count", SLEN("count"), node_count); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, - "Error setting node count to dict"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto out; } @@ -4079,8 +4216,10 @@ glusterd_dict_set_volid(dict_t *dict, char *volname, char **op_errstr) this = THIS; GF_ASSERT(this); - if (!dict || !volname) + if (!dict || !volname) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { @@ -5838,13 +5977,8 @@ glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr, static void glusterd_wait_for_blockers(glusterd_conf_t *priv) { - uint64_t blockers = GF_ATOMIC_GET(priv->blockers); - - while (blockers) { - synclock_unlock(&priv->big_lock); - sleep(1); - blockers = GF_ATOMIC_GET(priv->blockers); - synclock_lock(&priv->big_lock); + while (GF_ATOMIC_GET(priv->blockers)) { + synccond_wait(&priv->cond_blockers, &priv->big_lock); } } @@ -6479,6 +6613,10 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo = NULL; int hxl_children = 0; uuid_t candidate = {0}; + int brick_index = 0; + glusterd_peerinfo_t *peerinfo = NULL; + int delta = 0; + uuid_t candidate_max = {0}; if ((*index) == 0) (*index)++; @@ -6490,13 +6628,40 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo, cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list) { + if (gf_uuid_compare(brickinfo->uuid, candidate_max) > 0) { + if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) { + gf_uuid_copy(candidate_max, brickinfo->uuid); + } else { + peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL); + if (peerinfo && peerinfo->connected) { + gf_uuid_copy(candidate_max, brickinfo->uuid); + } + } + } + } + + cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list) + { if (gf_uuid_is_null(brickinfo->uuid)) (void)glusterd_resolve_brick(brickinfo); - if (gf_uuid_compare(brickinfo->uuid, candidate) > 0) - gf_uuid_copy(candidate, brickinfo->uuid); + delta %= hxl_children; + if ((*index + delta) == (brick_index + hxl_children)) { + if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) { + gf_uuid_copy(candidate, brickinfo->uuid); + } else { + peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL); + if (peerinfo && peerinfo->connected) { + gf_uuid_copy(candidate, brickinfo->uuid); + } else if (peerinfo && + (!gf_uuid_compare(candidate_max, MY_UUID))) { + _add_hxlator_to_dict(dict, volinfo, + ((*index) - 1) / hxl_children, + (*hxlator_count)); + (*hxlator_count)++; + } + } - if ((*index) % hxl_children == 0) { if (!gf_uuid_compare(MY_UUID, candidate)) { _add_hxlator_to_dict(dict, volinfo, ((*index) - 1) / hxl_children, @@ -6504,6 +6669,8 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo, (*hxlator_count)++; } gf_uuid_clear(candidate); + brick_index += hxl_children; + delta++; } (*index)++; diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c index 82acf5bf03c..18d355cb186 100644 --- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c @@ -367,8 +367,10 @@ glusterd_peerinfo_new(glusterd_friend_sm_state_t state, uuid_t *uuid, GF_ASSERT(conf); new_peer = GF_CALLOC(1, sizeof(*new_peer), gf_gld_mt_peerinfo_t); - if (!new_peer) + if (!new_peer) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } CDS_INIT_LIST_HEAD(&new_peer->uuid_list); @@ -564,12 +566,16 @@ glusterd_peer_hostname_new(const char *hostname, GF_ASSERT(hostname); GF_ASSERT(name); + xlator_t *this = THIS; + GF_ASSERT(this); peer_hostname = GF_CALLOC(1, sizeof(*peer_hostname), gf_gld_mt_peer_hostname_t); - if (!peer_hostname) + if (!peer_hostname) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } peer_hostname->hostname = gf_strdup(hostname); CDS_INIT_LIST_HEAD(&peer_hostname->hostname_list); @@ -900,8 +906,11 @@ gd_add_peer_hostnames_to_dict(glusterd_peerinfo_t *peerinfo, dict_t *dict, { snprintf(key, sizeof(key), "%s.hostname%d", prefix, count); ret = dict_set_dynstr_with_alloc(dict, key, addr->hostname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } count++; } @@ -923,41 +932,61 @@ gd_add_peer_detail_to_dict(glusterd_peerinfo_t *peerinfo, dict_t *friends, int keylen; char *peer_uuid_str = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); GF_ASSERT(peerinfo); GF_ASSERT(friends); peer_uuid_str = gd_peer_uuid_str(peerinfo); keylen = snprintf(key, sizeof(key), "friend%d.uuid", count); ret = dict_set_strn(friends, key, keylen, peer_uuid_str); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "friend%d.hostname", count); ret = dict_set_strn(friends, key, keylen, peerinfo->hostname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "friend%d.port", count); ret = dict_set_int32n(friends, key, keylen, peerinfo->port); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "friend%d.stateId", count); ret = dict_set_int32n(friends, key, keylen, peerinfo->state.state); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=%s in dict", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "friend%d.state", count); ret = dict_set_strn( friends, key, keylen, glusterd_friend_sm_state_name_get(peerinfo->state.state)); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "key=%s", + key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "friend%d.connected", count); ret = dict_set_int32n(friends, key, keylen, (int32_t)peerinfo->connected); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } snprintf(key, sizeof(key), "friend%d", count); ret = gd_add_peer_hostnames_to_dict(peerinfo, friends, key); diff --git a/xlators/mgmt/glusterd/src/glusterd-pmap.c b/xlators/mgmt/glusterd/src/glusterd-pmap.c index ec5bd1137f1..16ac628ab82 100644 --- a/xlators/mgmt/glusterd/src/glusterd-pmap.c +++ b/xlators/mgmt/glusterd/src/glusterd-pmap.c @@ -433,17 +433,20 @@ __gluster_pmap_portbybrick(rpcsvc_request_t *req) char *brick = NULL; int port = 0; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_pmap_port_by_brick_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto fail; } brick = args.brick; - port = pmap_registry_search(THIS, brick, GF_PMAP_PORT_BRICKSERVER, + port = pmap_registry_search(this, brick, GF_PMAP_PORT_BRICKSERVER, _gf_false); if (!port) @@ -475,11 +478,14 @@ __gluster_pmap_brickbyport(rpcsvc_request_t *req) 0, }; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_pmap_brick_by_port_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto fail; } @@ -513,10 +519,13 @@ __gluster_pmap_signin(rpcsvc_request_t *req) }; int ret = -1; glusterd_brickinfo_t *brickinfo = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_pmap_signin_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto fail; } @@ -570,6 +579,7 @@ __gluster_pmap_signout(rpcsvc_request_t *req) if (ret < 0) { // failed to decode msg; req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto fail; } rsp.op_ret = pmap_registry_remove(THIS, args.port, args.brick, diff --git a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c index d96adcae89e..a05c90d7b10 100644 --- a/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c @@ -114,7 +114,7 @@ glusterd_proc_stop(glusterd_proc_t *proc, int sig, int flags) goto out; synclock_unlock(&conf->big_lock); - sleep(1); + synctask_sleep(1); synclock_lock(&conf->big_lock); if (gf_is_service_running(proc->pidfile, &pid)) { ret = kill(pid, SIGKILL); diff --git a/xlators/mgmt/glusterd/src/glusterd-quota.c b/xlators/mgmt/glusterd/src/glusterd-quota.c index cb2d9c7c384..8370c174ce3 100644 --- a/xlators/mgmt/glusterd/src/glusterd-quota.c +++ b/xlators/mgmt/glusterd/src/glusterd-quota.c @@ -478,8 +478,9 @@ glusterd_stop_all_quota_crawl_service(glusterd_conf_t *priv, if (dir == NULL) return; - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); - while (entry) { + while ((entry = sys_readdir(dir, scratch))) { + if (gf_irrelevant_entry(entry)) + continue; len = snprintf(pidfile, sizeof(pidfile), "%s/%s", pid_dir, entry->d_name); if ((len >= 0) && (len < sizeof(pidfile))) { @@ -487,8 +488,6 @@ glusterd_stop_all_quota_crawl_service(glusterd_conf_t *priv, _gf_true); sys_unlink(pidfile); } - - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); } sys_closedir(dir); } @@ -1900,10 +1899,9 @@ glusterd_get_gfid_from_brick(dict_t *dict, glusterd_volinfo_t *volinfo, } ret = sys_lgetxattr(backend_path, GFID_XATTR_KEY, gfid, 16); if (ret < 0) { - gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_SETXATTR_FAIL, - "Failed to get " - "extended attribute %s for directory %s. ", - GFID_XATTR_KEY, backend_path); + gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_GET_XATTR_FAIL, + "Attribute=%s, Directory=%s", GFID_XATTR_KEY, backend_path, + NULL); ret = 0; continue; } diff --git a/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c index fc0aaddcbe3..f26d832a06d 100644 --- a/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c +++ b/xlators/mgmt/glusterd/src/glusterd-quotad-svc.c @@ -127,8 +127,10 @@ glusterd_quotadsvc_start(glusterd_svc_t *svc, int flags) char *options[] = {svc->name, "--process-name", NULL}; cmdline = dict_new(); - if (!cmdline) + if (!cmdline) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } for (i = 0; options[i]; i++) { ret = snprintf(key, sizeof(key), "arg%d", i); diff --git a/xlators/mgmt/glusterd/src/glusterd-rebalance.c b/xlators/mgmt/glusterd/src/glusterd-rebalance.c index 4ce20a9e592..458bf168ede 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rebalance.c +++ b/xlators/mgmt/glusterd/src/glusterd-rebalance.c @@ -219,6 +219,9 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr, char valgrind_logfile[PATH_MAX] = { 0, }; + char msg[1024] = { + 0, + }; char *volfileserver = NULL; char *localtime_logging = NULL; @@ -270,12 +273,17 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr, "rebalance"); runinit(&runner); - if (this->ctx->cmd_args.valgrind) { + if (this->ctx->cmd_args.vgtool != _gf_none) { snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s-rebalance.log", priv->logdir, volinfo->volname); - runner_add_args(&runner, "valgrind", "--leak-check=full", - "--trace-children=yes", "--track-origins=yes", NULL); + if (this->ctx->cmd_args.vgtool == _gf_memcheck) + runner_add_args(&runner, "valgrind", "--leak-check=full", + "--trace-children=yes", "--track-origins=yes", + NULL); + else + runner_add_args(&runner, "valgrind", "--tool=drd", NULL); + runner_argprintf(&runner, "--log-file=%s", valgrind_logfile); } @@ -316,6 +324,10 @@ glusterd_handle_defrag_start(glusterd_volinfo_t *volinfo, char *op_errstr, runner_add_arg(&runner, "--localtime-logging"); } + snprintf(msg, sizeof(msg), "Starting the rebalance service for volume %s", + volinfo->volname); + runner_log(&runner, this->name, GF_LOG_DEBUG, msg); + ret = runner_run_nowait(&runner); if (ret) { gf_msg_debug("glusterd", 0, "rebalance command failed"); @@ -390,8 +402,10 @@ glusterd_rebalance_rpc_create(glusterd_volinfo_t *volinfo) goto out; options = dict_new(); - if (!options) + if (!options) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } GLUSTERD_GET_DEFRAG_SOCK_FILE(sockfile, volinfo); @@ -497,6 +511,7 @@ __glusterd_handle_defrag_volume(rpcsvc_request_t *req) if (ret < 0) { // failed to decode msg; req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c index a861240da31..43c2f4373e0 100644 --- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c +++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c @@ -227,6 +227,20 @@ glusterd_op_stage_replace_brick(dict_t *dict, char **op_errstr, is_force = _gf_true; } + if (volinfo->snap_count > 0 || !cds_list_empty(&volinfo->snap_volumes)) { + snprintf(msg, sizeof(msg), + "Volume %s has %" PRIu64 + " snapshots. " + "Changing the volume configuration will not effect snapshots." + "But the snapshot brick mount should be intact to " + "make them function.", + volname, volinfo->snap_count); + gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s", msg); + msg[0] = '\0'; + } + + glusterd_add_peers_to_auth_list(volname); + ret = glusterd_get_dst_brick_info(&dst_brick, volname, op_errstr, &dst_brickinfo, &host, dict, &dup_dstbrick); diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index a8e35f32a15..88662e3bbae 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -183,10 +183,8 @@ glusterd_op_send_cli_response(glusterd_op_t op, int32_t op_ret, ret = dict_allocate_and_serialize(ctx, &rsp.dict.dict_val, &rsp.dict.dict_len); if (ret < 0) - gf_msg(this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "failed to " - "serialize buffer"); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); else free_ptr = rsp.dict.dict_val; } @@ -1464,6 +1462,7 @@ glusterd_rpc_probe(call_frame_t *frame, xlator_t *this, void *data) dict_t *dict = NULL; if (!frame || !this || !data) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); ret = -1; goto out; } @@ -1473,15 +1472,24 @@ glusterd_rpc_probe(call_frame_t *frame, xlator_t *this, void *data) GF_ASSERT(priv); ret = dict_get_strn(dict, "hostname", SLEN("hostname"), &hostname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=hostname", NULL); goto out; + } ret = dict_get_int32n(dict, "port", SLEN("port"), &port); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_DEBUG, errno, GD_MSG_DICT_GET_FAILED, + "Key=port", NULL); port = GF_DEFAULT_BASE_PORT; + } ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo)); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=peerinfo", NULL); goto out; + } gf_uuid_copy(req.uuid, MY_UUID); req.hostname = gf_strdup(hostname); @@ -1510,6 +1518,7 @@ glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data) dict_t *peer_data = NULL; if (!frame || !this || !data) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); ret = -1; goto out; } @@ -1540,6 +1549,8 @@ glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data) peer_data = dict_new(); if (!peer_data) { + gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL, + NULL); errno = ENOMEM; goto out; } @@ -1585,8 +1596,11 @@ glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data) if (!req.vols.vols_len) { ret = dict_allocate_and_serialize(peer_data, &req.vols.vols_val, &req.vols.vols_len); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } } ret = glusterd_submit_request( @@ -1760,8 +1774,11 @@ glusterd_mgmt_v3_lock_peers(call_frame_t *frame, xlator_t *this, void *data) GF_ASSERT(priv); ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo)); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=peerinfo", NULL); goto out; + } // peerinfo should not be in payload dict_deln(dict, "peerinfo", SLEN("peerinfo")); @@ -1771,9 +1788,8 @@ glusterd_mgmt_v3_lock_peers(call_frame_t *frame, xlator_t *this, void *data) ret = dict_allocate_and_serialize(dict, &req.dict.dict_val, &req.dict.dict_len); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "Failed to serialize dict " - "to request buffer"); + gf_smsg(this->name, GF_LOG_ERROR, 0, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } @@ -1797,6 +1813,7 @@ glusterd_mgmt_v3_lock_peers(call_frame_t *frame, xlator_t *this, void *data) } frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t); if (!frame->cookie) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); ret = -1; goto out; } @@ -1836,8 +1853,11 @@ glusterd_mgmt_v3_unlock_peers(call_frame_t *frame, xlator_t *this, void *data) GF_ASSERT(priv); ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo)); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=peerinfo", NULL); goto out; + } // peerinfo should not be in payload dict_deln(dict, "peerinfo", SLEN("peerinfo")); @@ -1847,9 +1867,8 @@ glusterd_mgmt_v3_unlock_peers(call_frame_t *frame, xlator_t *this, void *data) ret = dict_allocate_and_serialize(dict, &req.dict.dict_val, &req.dict.dict_len); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "Failed to serialize dict " - "to request buffer"); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } @@ -1873,6 +1892,7 @@ glusterd_mgmt_v3_unlock_peers(call_frame_t *frame, xlator_t *this, void *data) } frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t); if (!frame->cookie) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); ret = -1; goto out; } @@ -1954,8 +1974,11 @@ glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data) GF_ASSERT(priv); ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo)); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=peerinfo", NULL); goto out; + } // peerinfo should not be in payload dict_deln(dict, "peerinfo", SLEN("peerinfo")); @@ -1965,9 +1988,8 @@ glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data) ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "Failed to serialize dict " - "to request buffer"); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } /* Sending valid transaction ID to peers */ @@ -1989,6 +2011,7 @@ glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data) } frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t); if (!frame->cookie) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); ret = -1; goto out; } @@ -2030,8 +2053,11 @@ glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data) GF_ASSERT(priv); ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo)); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=peerinfo", NULL); goto out; + } // peerinfo should not be in payload dict_deln(dict, "peerinfo", SLEN("peerinfo")); @@ -2041,9 +2067,8 @@ glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data) ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "Failed to serialize dict to " - "request buffer"); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; } /* Sending valid transaction ID to peers */ @@ -2065,6 +2090,7 @@ glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data) } frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t); if (!frame->cookie) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); ret = -1; goto out; } diff --git a/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c b/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c index eab9078eb8e..c49a0eefba5 100644 --- a/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c +++ b/xlators/mgmt/glusterd/src/glusterd-scrub-svc.c @@ -117,8 +117,10 @@ glusterd_scrubsvc_start(glusterd_svc_t *svc, int flags) dict_t *cmdict = NULL; cmdict = dict_new(); - if (!cmdict) + if (!cmdict) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto error_return; + } ret = dict_set_str(cmdict, "cmdarg0", "--global-timer-wheel"); if (ret) diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c index f3781879d99..b0b8a2e4018 100644 --- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c @@ -89,12 +89,15 @@ glusterd_validate_quorum(xlator_t *this, glusterd_op_t op, dict_t *dict, ret = dict_get_str(dict, "volname", &volname); if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); ret = 0; goto out; } ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, NULL); ret = 0; goto out; } @@ -252,8 +255,11 @@ glusterd_is_volume_in_server_quorum(glusterd_volinfo_t *volinfo) int ret = 0; ret = dict_get_str(volinfo->dict, GLUSTERD_QUORUM_TYPE_KEY, &quorum_type); - if (ret) + if (ret) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", GLUSTERD_QUORUM_TYPE_KEY, NULL); goto out; + } if (strcmp(quorum_type, GLUSTERD_SERVER_QUORUM) == 0) res = _gf_true; @@ -287,8 +293,11 @@ does_gd_meet_server_quorum(xlator_t *this) ret = glusterd_get_quorum_cluster_counts(this, &active_count, &quorum_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_QUORUM_CLUSTER_COUNT_GET_FAIL, NULL); goto out; + } if (!does_quorum_meet(active_count, quorum_count)) { goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c index e106398e697..1c56384a14b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c @@ -155,6 +155,8 @@ glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo) int ret = -1; dict_t *mod_dict = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); glusterd_svc_build_shd_volfile_path(volinfo, filepath, PATH_MAX); if (!glusterd_is_shd_compatible_volume(volinfo)) { @@ -166,28 +168,42 @@ glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo) goto out; } mod_dict = dict_new(); - if (!mod_dict) + if (!mod_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=cluster.background-self-heal-count", NULL); goto out; + } ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on"); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=cluster.data-self-heal", NULL); goto out; + } ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on"); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=cluster.metadata-self-heal", NULL); goto out; + } ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on"); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=cluster.entry-self-heal", NULL); goto out; + } ret = glusterd_shdsvc_generate_volfile(volinfo, filepath, mod_dict); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, "Failed to create volfile"); goto out; } @@ -195,7 +211,7 @@ glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo) out: if (mod_dict) dict_unref(mod_dict); - gf_msg_debug(THIS->name, 0, "Returning %d", ret); + gf_msg_debug(this->name, 0, "Returning %d", ret); return ret; } @@ -270,9 +286,7 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) } while (conf->restart_shd) { - synclock_unlock(&conf->big_lock); - sleep(2); - synclock_lock(&conf->big_lock); + synccond_wait(&conf->cond_restart_shd, &conf->big_lock); } conf->restart_shd = _gf_true; shd_restart = _gf_true; @@ -328,8 +342,10 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) } } out: - if (shd_restart) + if (shd_restart) { conf->restart_shd = _gf_false; + synccond_broadcast(&conf->cond_restart_shd); + } if (volinfo) glusterd_volinfo_unref(volinfo); if (ret) @@ -346,6 +362,8 @@ glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags) char glusterd_uuid_option[PATH_MAX] = {0}; char client_pid[32] = {0}; dict_t *cmdline = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); cmdline = dict_new(); if (!cmdline) @@ -362,31 +380,49 @@ glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags) goto out; ret = dict_set_str(cmdline, "arg", client_pid); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=arg", NULL); goto out; + } /* Pass cmdline arguments as key-value pair. The key is merely * a carrier and is not used. Since dictionary follows LIFO the value * should be put in reverse order*/ ret = dict_set_str(cmdline, "arg4", svc->name); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=arg4", NULL); goto out; + } ret = dict_set_str(cmdline, "arg3", GD_SHD_PROCESS_NAME); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=arg3", NULL); goto out; + } ret = dict_set_str(cmdline, "arg2", glusterd_uuid_option); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=arg2", NULL); goto out; + } ret = dict_set_str(cmdline, "arg1", "--xlator-option"); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=arg1", NULL); goto out; + } ret = glusterd_svc_start(svc, flags, cmdline); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_GLUSTER_SERVICE_START_FAIL, NULL); goto out; + } ret = glusterd_conn_connect(&(svc->conn)); out: @@ -539,28 +575,45 @@ glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo) goto out; } mod_dict = dict_new(); - if (!mod_dict) + if (!mod_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=cluster.background-self-heal-count", NULL); goto out; + } ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on"); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=cluster.data-self-heal", NULL); goto out; + } ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on"); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=cluster.metadata-self-heal", NULL); goto out; + } ret = dict_set_int32(mod_dict, "graph-check", 1); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=graph-check", NULL); goto out; + } ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on"); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=cluster.entry-self-heal", NULL); goto out; + } ret = glusterd_volume_svc_check_volfile_identical( "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile, diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c index 7b67592e27c..bf2d81b644a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-sm.c @@ -146,22 +146,33 @@ glusterd_broadcast_friend_delete(char *hostname, uuid_t uuid) ctx.op = GD_FRIEND_UPDATE_DEL; friends = dict_new(); - if (!friends) + if (!friends) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "op"); ret = dict_set_int32n(friends, key, keylen, ctx.op); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "hostname"); ret = dict_set_strn(friends, key, keylen, hostname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } ret = dict_set_int32n(friends, "count", SLEN("count"), count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } RCU_READ_LOCK; cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list) @@ -370,30 +381,45 @@ glusterd_ac_friend_probe(glusterd_friend_sm_event_t *event, void *ctx) peerinfo = glusterd_peerinfo_find(NULL, probe_ctx->hostname); if (peerinfo == NULL) { // We should not reach this state ideally + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND, NULL); ret = -1; goto unlock; } - if (!peerinfo->peer) + if (!peerinfo->peer) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_ADDRESS_GET_FAIL, + NULL); goto unlock; + } proc = &peerinfo->peer->proctable[GLUSTERD_PROBE_QUERY]; if (proc->fn) { frame = create_frame(this, this->ctx->pool); if (!frame) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FRAME_CREATE_FAIL, + NULL); goto unlock; } frame->local = ctx; dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); goto unlock; + } ret = dict_set_strn(dict, "hostname", SLEN("hostname"), probe_ctx->hostname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=hostname", NULL); goto unlock; + } ret = dict_set_int32n(dict, "port", SLEN("port"), probe_ctx->port); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=port", NULL); goto unlock; + } /* The peerinfo reference being set here is going to be used * only within this critical section, in glusterd_rpc_probe @@ -482,12 +508,17 @@ glusterd_ac_send_friend_remove_req(glusterd_friend_sm_event_t *event, goto unlock; } - if (!peerinfo->peer) + if (!peerinfo->peer) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_ADDRESS_GET_FAIL, + NULL); goto unlock; + } proc = &peerinfo->peer->proctable[GLUSTERD_FRIEND_REMOVE]; if (proc->fn) { frame = create_frame(this, this->ctx->pool); if (!frame) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FRAME_CREATE_FAIL, + NULL); goto unlock; } frame->local = data; @@ -556,13 +587,18 @@ glusterd_ac_send_friend_update(glusterd_friend_sm_event_t *event, void *ctx) goto out; } - if (!friends) + if (!friends) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto unlock; + } ev_ctx.op = GD_FRIEND_UPDATE_ADD; ret = dict_set_int32n(friends, key, keylen, ev_ctx.op); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto unlock; + } cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list) { @@ -578,8 +614,11 @@ glusterd_ac_send_friend_update(glusterd_friend_sm_event_t *event, void *ctx) } ret = dict_set_int32n(friends, "count", SLEN("count"), count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto unlock; + } cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list) { @@ -665,13 +704,18 @@ glusterd_ac_update_friend(glusterd_friend_sm_event_t *event, void *ctx) goto unlock; } - if (!friends) + if (!friends) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ev_ctx.op = GD_FRIEND_UPDATE_ADD; ret = dict_set_int32n(friends, key, keylen, ev_ctx.op); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto unlock; + } cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list) { @@ -687,8 +731,11 @@ glusterd_ac_update_friend(glusterd_friend_sm_event_t *event, void *ctx) } ret = dict_set_int32n(friends, "count", SLEN("count"), count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto unlock; + } ret = dict_set_static_ptr(friends, "peerinfo", cur_peerinfo); if (ret) { @@ -1062,6 +1109,7 @@ glusterd_friend_sm_transition_state(uuid_t peerid, char *peername, RCU_READ_LOCK; peerinfo = glusterd_peerinfo_find(peerid, peername); if (!peerinfo) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND, NULL); goto out; } diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c index 3042789916c..d75f249b29e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c +++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c @@ -87,8 +87,10 @@ glusterd_snapdsvc_init(void *data) svc = &(volinfo->snapd.svc); ret = snprintf(svc->name, sizeof(svc->name), "%s", snapd_svc_name); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; + } notify = glusterd_snapdsvc_rpc_notify; @@ -115,6 +117,7 @@ glusterd_snapdsvc_init(void *data) glusterd_svc_build_snapd_logfile(logfile, logdir, sizeof(logfile)); len = snprintf(volfileid, sizeof(volfileid), "snapd/%s", volinfo->volname); if ((len < 0) || (len >= sizeof(volfileid))) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -301,16 +304,22 @@ glusterd_snapdsvc_start(glusterd_svc_t *svc, int flags) } runinit(&runner); - if (this->ctx->cmd_args.valgrind) { + if (this->ctx->cmd_args.vgtool != _gf_none) { len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-snapd.log", svc->proc.logdir); if ((len < 0) || (len >= PATH_MAX)) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } - runner_add_args(&runner, "valgrind", "--leak-check=full", - "--trace-children=yes", "--track-origins=yes", NULL); + if (this->ctx->cmd_args.vgtool == _gf_memcheck) + runner_add_args(&runner, "valgrind", "--leak-check=full", + "--trace-children=yes", "--track-origins=yes", + NULL); + else + runner_add_args(&runner, "valgrind", "--tool=drd", NULL); + runner_argprintf(&runner, "--log-file=%s", valgrind_logfile); } diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c index 43735d33fee..995268b796d 100644 --- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c @@ -282,12 +282,10 @@ glusterd_snap_volinfo_restore(dict_t *dict, dict_t *rsp_dict, new_volinfo->volume_id, sizeof(new_volinfo->volume_id), XATTR_REPLACE); if (ret == -1) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SETXATTR_FAIL, - "Failed to " - "set extended attribute %s on %s. " - "Reason: %s, snap: %s", - GF_XATTR_VOL_ID_KEY, new_brickinfo->path, - strerror(errno), new_volinfo->volname); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SET_XATTR_FAIL, + "Attribute=%s, Path=%s, Reason=%s, Snap=%s", + GF_XATTR_VOL_ID_KEY, new_brickinfo->path, + strerror(errno), new_volinfo->volname, NULL); goto out; } } @@ -1961,9 +1959,7 @@ glusterd_update_snaps_synctask(void *opaque) synclock_lock(&conf->big_lock); while (conf->restart_bricks) { - synclock_unlock(&conf->big_lock); - sleep(2); - synclock_lock(&conf->big_lock); + synccond_wait(&conf->cond_restart_bricks, &conf->big_lock); } conf->restart_bricks = _gf_true; @@ -2041,8 +2037,9 @@ glusterd_update_snaps_synctask(void *opaque) "Failed to remove snap %s", snap->snapname); goto out; } - if (dict) - dict_unref(dict); + + dict_unref(dict); + dict = NULL; } snprintf(buf, sizeof(buf), "%s.accept_peer_data", prefix); ret = dict_get_int32(peer_data, buf, &val); @@ -2070,6 +2067,7 @@ out: if (dict) dict_unref(dict); conf->restart_bricks = _gf_false; + synccond_broadcast(&conf->cond_restart_bricks); return ret; } @@ -2149,18 +2147,27 @@ glusterd_add_snapd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, snprintf(base_key, sizeof(base_key), "brick%d", count); snprintf(key, sizeof(key), "%s.hostname", base_key); ret = dict_set_str(dict, key, "Snapshot Daemon"); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } snprintf(key, sizeof(key), "%s.path", base_key); ret = dict_set_dynstr(dict, key, gf_strdup(uuid_utoa(MY_UUID))); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } snprintf(key, sizeof(key), "%s.port", base_key); ret = dict_set_int32(dict, key, volinfo->snapd.port); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } glusterd_svc_build_snapd_pidfile(volinfo, pidfile, sizeof(pidfile)); @@ -2170,8 +2177,11 @@ glusterd_add_snapd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, snprintf(key, sizeof(key), "%s.pid", base_key); ret = dict_set_int32(dict, key, pid); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } snprintf(key, sizeof(key), "%s.status", base_key); ret = dict_set_int32(dict, key, brick_online); @@ -2672,8 +2682,10 @@ glusterd_missed_snapinfo_new(glusterd_missed_snap_info **missed_snapinfo) new_missed_snapinfo = GF_CALLOC(1, sizeof(*new_missed_snapinfo), gf_gld_mt_missed_snapinfo_t); - if (!new_missed_snapinfo) + if (!new_missed_snapinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } CDS_INIT_LIST_HEAD(&new_missed_snapinfo->missed_snaps); CDS_INIT_LIST_HEAD(&new_missed_snapinfo->snap_ops); @@ -2701,8 +2713,10 @@ glusterd_missed_snap_op_new(glusterd_snap_op_t **snap_op) new_snap_op = GF_CALLOC(1, sizeof(*new_snap_op), gf_gld_mt_missed_snapinfo_t); - if (!new_snap_op) + if (!new_snap_op) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } new_snap_op->brick_num = -1; new_snap_op->op = -1; @@ -3594,13 +3608,17 @@ glusterd_copy_folder(const char *source, const char *destination) continue; ret = snprintf(src_path, sizeof(src_path), "%s/%s", source, entry->d_name); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; + } ret = snprintf(dest_path, sizeof(dest_path), "%s/%s", destination, entry->d_name); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; + } ret = glusterd_copy_file(src_path, dest_path); if (ret) { @@ -3756,8 +3774,10 @@ glusterd_copy_quota_files(glusterd_volinfo_t *src_vol, GLUSTERD_GET_VOLUME_DIR(dest_dir, dest_vol, priv); ret = snprintf(src_path, sizeof(src_path), "%s/quota.conf", src_dir); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; + } /* quota.conf is not present if quota is not enabled, Hence ignoring * the absence of this file @@ -3770,8 +3790,10 @@ glusterd_copy_quota_files(glusterd_volinfo_t *src_vol, } ret = snprintf(dest_path, sizeof(dest_path), "%s/quota.conf", dest_dir); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; + } ret = glusterd_copy_file(src_path, dest_path); if (ret) { @@ -3795,8 +3817,10 @@ glusterd_copy_quota_files(glusterd_volinfo_t *src_vol, } ret = snprintf(dest_path, sizeof(dest_path), "%s/quota.cksum", dest_dir); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; + } ret = glusterd_copy_file(src_path, dest_path); if (ret) { @@ -4066,8 +4090,10 @@ glusterd_restore_nfs_ganesha_file(glusterd_volinfo_t *src_vol, ret = snprintf(src_path, sizeof(src_path), "%s/export.%s.conf", snap_dir, snap->snapname); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; + } ret = sys_lstat(src_path, &stbuf); if (ret) { @@ -4082,8 +4108,10 @@ glusterd_restore_nfs_ganesha_file(glusterd_volinfo_t *src_vol, ret = snprintf(dest_path, sizeof(dest_path), "%s/export.%s.conf", GANESHA_EXPORT_DIRECTORY, src_vol->volname); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; + } ret = glusterd_copy_file(src_path, dest_path); if (ret) diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c index 4703a072294..aeaa8d15214 100644 --- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c +++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c @@ -514,6 +514,7 @@ glusterd_copy_geo_rep_session_files(char *session, glusterd_volinfo_t *snap_vol) ret = snprintf(georep_session_dir, sizeof(georep_session_dir), "%s/%s/%s", priv->workdir, GEOREP, session); if (ret < 0) { /* Negative value is an error */ + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL); goto out; } @@ -521,6 +522,7 @@ glusterd_copy_geo_rep_session_files(char *session, glusterd_volinfo_t *snap_vol) priv->workdir, GLUSTERD_VOL_SNAP_DIR_PREFIX, snap_vol->snapshot->snapname, GEOREP, session); if (ret < 0) { /* Negative value is an error */ + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL); goto out; } @@ -568,12 +570,14 @@ glusterd_copy_geo_rep_session_files(char *session, glusterd_volinfo_t *snap_vol) ret = snprintf(src_path, sizeof(src_path), "%s/%s", georep_session_dir, files[i]->d_name); if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL); goto out; } ret = snprintf(dest_path, sizeof(dest_path), "%s/%s", snap_session_dir, files[i]->d_name); if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL); goto out; } @@ -632,12 +636,14 @@ glusterd_snapshot_backup_vol(glusterd_volinfo_t *volinfo) "%s/" GLUSTERD_TRASH "/vols-%s.deleted", priv->workdir, volinfo->volname); if ((len < 0) || (len >= sizeof(delete_path))) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL); goto out; } len = snprintf(trashdir, sizeof(trashdir), "%s/" GLUSTERD_TRASH, priv->workdir); - if ((len < 0) || (len >= sizeof(delete_path))) { + if ((len < 0) || (len >= sizeof(trashdir))) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL); goto out; } @@ -730,6 +736,7 @@ glusterd_copy_geo_rep_files(glusterd_volinfo_t *origin_vol, * is slave volume. */ if (!origin_vol->gsync_slaves) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_SLAVE, NULL); ret = 0; goto out; } @@ -1418,6 +1425,8 @@ glusterd_handle_snapshot_config(rpcsvc_request_t *req, glusterd_op_t op, &config_command); if (ret) { snprintf(err_str, len, "Failed to get config-command type"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=config-command", NULL); goto out; } @@ -1976,6 +1985,13 @@ glusterd_snap_create_clone_common_prevalidate( "command or use [force] option in " "snapshot create to override this " "behavior."); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_BRICK_NOT_RUNNING, + "Please run volume status command to see brick " + "status.Please start the stopped brick and then issue " + "snapshot create command or use 'force' option in " + "snapshot create to override this behavior.", + NULL); } else { snprintf(err_str, PATH_MAX, "One or more bricks are not running. " @@ -1984,6 +2000,12 @@ glusterd_snap_create_clone_common_prevalidate( "Please start the stopped brick " "and then issue snapshot clone " "command "); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_BRICK_NOT_RUNNING, + "Please run snapshot status command to see brick " + "status. Please start the stopped brick and then issue " + "snapshot clone command.", + NULL); } *op_errno = EG_BRCKDWN; ret = -1; @@ -1999,6 +2021,10 @@ glusterd_snap_create_clone_common_prevalidate( if (len < 0) { strcpy(err_str, "<error>"); } + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_BRK_MNTPATH_GET_FAIL, + "Brick_hostname=%s, Brick_path=%s", brickinfo->hostname, + brickinfo->path, NULL); ret = -1; goto out; } @@ -2010,6 +2036,11 @@ glusterd_snap_create_clone_common_prevalidate( "all bricks of %s are thinly " "provisioned LV.", volinfo->volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_SNAPSHOT_NOT_THIN_PROVISIONED, + "Ensure that all bricks of volume are thinly " + "provisioned LV, Volume=%s", + volinfo->volname, NULL); ret = -1; goto out; } @@ -2022,6 +2053,9 @@ glusterd_snap_create_clone_common_prevalidate( "cannot copy the snapshot device " "name (volname: %s, snapname: %s)", volinfo->volname, snapname); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_SNAP_DEVICE_NAME_GET_FAIL, "Volname=%s, Snapname=%s", + volinfo->volname, snapname, NULL); *loglevel = GF_LOG_WARNING; ret = -1; goto out; @@ -2188,6 +2222,16 @@ glusterd_snapshot_clone_prevalidate(dict_t *dict, char **op_errstr, goto out; } + if (!glusterd_is_volume_started(snap_vol)) { + snprintf(err_str, sizeof(err_str), + "Snapshot %s is " + "not activated", + snap->snapname); + loglevel = GF_LOG_WARNING; + *op_errno = EG_VOLSTP; + goto out; + } + ret = dict_get_bin(dict, "vol1_volid", (void **)&snap_volid); if (ret) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, @@ -3211,7 +3255,7 @@ glusterd_snapshot_get_snap_detail(dict_t *dict, glusterd_snap_t *snap, int volcount = 0; char key[32] = ""; /* keyprefix is quite small, up to 16 bytes */ int keylen; - char timestr[64] = ""; + char timestr[GF_TIMESTR_SIZE] = ""; char *value = NULL; glusterd_volinfo_t *snap_vol = NULL; glusterd_volinfo_t *tmp_vol = NULL; @@ -3886,7 +3930,8 @@ glusterd_handle_snapshot_create(rpcsvc_request_t *req, glusterd_op_t op, goto out; } - ret = dict_set_int64(dict, "snap-time", (int64_t)time(&snap_time)); + snap_time = gf_time(); + ret = dict_set_int64(dict, "snap-time", (int64_t)snap_time); if (ret) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Unable to set snap-time"); @@ -4451,6 +4496,7 @@ glusterd_add_missed_snaps_to_dict(dict_t *rsp_dict, snap_uuid, snap_vol->volname, brick_number, brickinfo->path, op, GD_MISSED_SNAP_PENDING); if ((len < 0) || (len >= sizeof(missed_snap_entry))) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_COPY_FAIL, NULL); goto out; } @@ -4458,6 +4504,8 @@ glusterd_add_missed_snaps_to_dict(dict_t *rsp_dict, ret = dict_get_int32n(rsp_dict, "missed_snap_count", SLEN("missed_snap_count"), &missed_snap_count); if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=missed_snap_count", NULL); /* Initialize the missed_snap_count for the first time */ missed_snap_count = 0; } @@ -4647,7 +4695,7 @@ glusterd_snap_brick_create(glusterd_volinfo_t *snap_volinfo, ret = sys_lsetxattr(brickinfo->path, GF_XATTR_VOL_ID_KEY, snap_volinfo->volume_id, 16, XATTR_REPLACE); if (ret == -1) { - gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_SETXATTR_FAIL, + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL, "Failed to set " "extended attribute %s on %s. Reason: " "%s, snap: %s", @@ -5275,6 +5323,48 @@ glusterd_do_snap_vol(glusterd_volinfo_t *origin_vol, glusterd_snap_t *snap, dict_deln(snap_vol->dict, "features.barrier", SLEN("features.barrier")); gd_update_volume_op_versions(snap_vol); + /* * + * Create the export file from the node where ganesha.enable "on" + * is executed + * */ + if (glusterd_is_ganesha_cluster() && + glusterd_check_ganesha_export(snap_vol)) { + if (is_origin_glusterd(dict)) { + ret = manage_export_config(clonename, "on", NULL); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_EXPORT_FILE_CREATE_FAIL, + "Failed to create" + "export file for NFS-Ganesha\n"); + goto out; + } + } + + ret = dict_set_dynstr_with_alloc(snap_vol->dict, + "features.cache-invalidation", "on"); + ret = gd_ganesha_send_dbus(clonename, "on"); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EXPORT_FILE_CREATE_FAIL, + "Dynamic export addition/deletion failed." + " Please see log file for details. Clone name = %s", + clonename); + goto out; + } + } + if (!glusterd_is_ganesha_cluster() && + glusterd_check_ganesha_export(snap_vol)) { + /* This happens when a snapshot was created when Ganesha was + * enabled globally. Then Ganesha disabled from the cluster. + * In such cases, we will have the volume level option set + * on dict, So we have to disable it as it doesn't make sense + * to keep the option. + */ + + ret = dict_set_dynstr(snap_vol->dict, "ganesha.enable", "off"); + if (ret) + goto out; + } + ret = glusterd_store_volinfo(snap_vol, GLUSTERD_VOLINFO_VER_AC_INCREMENT); if (ret) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_SET_FAIL, @@ -5346,8 +5436,31 @@ out: for (i = 0; unsupported_opt[i].key; i++) GF_FREE(unsupported_opt[i].value); - if (snap_vol) + if (snap_vol) { + if (glusterd_is_ganesha_cluster() && + glusterd_check_ganesha_export(snap_vol)) { + if (is_origin_glusterd(dict)) { + ret = manage_export_config(clonename, "on", NULL); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_EXPORT_FILE_CREATE_FAIL, + "Failed to create" + "export file for NFS-Ganesha\n"); + } + } + + ret = gd_ganesha_send_dbus(clonename, "off"); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, + GD_MSG_EXPORT_FILE_CREATE_FAIL, + "Dynamic export addition/deletion failed." + " Please see log file for details. Clone name = %s", + clonename); + } + } + glusterd_snap_volume_remove(rsp_dict, snap_vol, _gf_true, _gf_true); + } snap_vol = NULL; } @@ -5399,6 +5512,8 @@ glusterd_snapshot_activate_deactivate_prevalidate(dict_t *dict, "Snapshot (%s) does not " "exist.", snapname); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_NOT_FOUND, + "Snapname=%s", snapname, NULL); *op_errno = EG_NOSNAP; ret = -1; goto out; @@ -7204,11 +7319,15 @@ glusterd_get_brick_lvm_details(dict_t *rsp_dict, if (token != NULL) { value = gf_strdup(token); if (!value) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "token=%s", token, NULL); ret = -1; goto end; } ret = snprintf(key, sizeof(key), "%s.data", key_prefix); if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, + NULL); goto end; } @@ -7223,11 +7342,15 @@ glusterd_get_brick_lvm_details(dict_t *rsp_dict, if (token != NULL) { value = gf_strdup(token); if (!value) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "token=%s", token, NULL); ret = -1; goto end; } ret = snprintf(key, sizeof(key), "%s.lvsize", key_prefix); if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, + NULL); goto end; } @@ -7287,6 +7410,7 @@ glusterd_get_single_brick_status(char **op_errstr, dict_t *rsp_dict, keylen = snprintf(key, sizeof(key), "%s.brick%d.path", keyprefix, index); if (keylen < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -7294,11 +7418,14 @@ glusterd_get_single_brick_status(char **op_errstr, dict_t *rsp_dict, ret = snprintf(brick_path, sizeof(brick_path), "%s:%s", brickinfo->hostname, brickinfo->path); if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; } value = gf_strdup(brick_path); if (!value) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "brick_path=%s", brick_path, NULL); ret = -1; goto out; } @@ -7374,6 +7501,8 @@ glusterd_get_single_brick_status(char **op_errstr, dict_t *rsp_dict, index); if (keylen < 0) { ret = -1; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, + NULL); goto out; } @@ -7459,6 +7588,7 @@ glusterd_get_single_snap_status(char **op_errstr, dict_t *rsp_dict, { keylen = snprintf(key, sizeof(key), "%s.vol%d", keyprefix, volcount); if (keylen < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -7482,6 +7612,7 @@ glusterd_get_single_snap_status(char **op_errstr, dict_t *rsp_dict, } keylen = snprintf(brickkey, sizeof(brickkey), "%s.brickcount", key); if (keylen < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; } @@ -7496,6 +7627,7 @@ glusterd_get_single_snap_status(char **op_errstr, dict_t *rsp_dict, keylen = snprintf(key, sizeof(key), "%s.volcount", keyprefix); if (keylen < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -7535,6 +7667,7 @@ glusterd_get_each_snap_object_status(char **op_errstr, dict_t *rsp_dict, */ keylen = snprintf(key, sizeof(key), "%s.snapname", keyprefix); if (keylen < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -7556,6 +7689,7 @@ glusterd_get_each_snap_object_status(char **op_errstr, dict_t *rsp_dict, keylen = snprintf(key, sizeof(key), "%s.uuid", keyprefix); if (keylen < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -7639,6 +7773,7 @@ glusterd_get_snap_status_of_volume(char **op_errstr, dict_t *rsp_dict, { ret = snprintf(key, sizeof(key), "status.snap%d.snapname", i); if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; } @@ -7690,6 +7825,7 @@ glusterd_get_all_snapshot_status(dict_t *dict, char **op_errstr, { ret = snprintf(key, sizeof(key), "status.snap%d.snapname", i); if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; } @@ -8733,6 +8869,7 @@ glusterd_snapshot_revert_partial_restored_vol(glusterd_volinfo_t *volinfo) "%s/" GLUSTERD_TRASH "/vols-%s.deleted", priv->workdir, volinfo->volname); if ((len < 0) || (len >= sizeof(trash_path))) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -8793,12 +8930,10 @@ glusterd_snapshot_revert_partial_restored_vol(glusterd_volinfo_t *volinfo) snap_vol->volume_id, sizeof(snap_vol->volume_id), XATTR_REPLACE); if (ret == -1) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SETXATTR_FAIL, - "Failed to set extended " - "attribute %s on %s. " - "Reason: %s, snap: %s", - GF_XATTR_VOL_ID_KEY, brickinfo->path, - strerror(errno), snap_vol->volname); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_SET_XATTR_FAIL, + "Attribute=%s, Path=%s, Reason=%s, Snap=%s", + GF_XATTR_VOL_ID_KEY, brickinfo->path, + strerror(errno), snap_vol->volname, NULL); goto out; } } @@ -9178,6 +9313,7 @@ glusterd_handle_snapshot_fn(rpcsvc_request_t *req) ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c index 458df8dbd1d..d94dceb10b7 100644 --- a/xlators/mgmt/glusterd/src/glusterd-store.c +++ b/xlators/mgmt/glusterd/src/glusterd-store.c @@ -74,7 +74,7 @@ glusterd_replace_slash_with_hyphen(char *str) while (ptr) { *ptr = '-'; - ptr = strchr(str, '/'); + ptr = strchr(ptr, '/'); } } @@ -660,85 +660,72 @@ out: } static int -_storeslaves(dict_t *this, char *key, data_t *value, void *data) -{ - int32_t ret = 0; - gf_store_handle_t *shandle = NULL; - xlator_t *xl = NULL; - - xl = THIS; - GF_ASSERT(xl); - - shandle = (gf_store_handle_t *)data; - - GF_ASSERT(shandle); - GF_ASSERT(shandle->fd > 0); - GF_ASSERT(shandle->path); - GF_ASSERT(key); - GF_ASSERT(value); - GF_ASSERT(value->data); - - gf_msg_debug(xl->name, 0, "Storing in volinfo:key= %s, val=%s", key, - value->data); - - ret = gf_store_save_value(shandle->fd, key, (char *)value->data); - if (ret) { - gf_msg(xl->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_WRITE_FAIL, - "Unable to write into store" - " handle for path: %s", - shandle->path); - return -1; - } - return 0; -} - -int -_storeopts(dict_t *this, char *key, data_t *value, void *data) +_storeopts(dict_t *dict_value, char *key, data_t *value, void *data) { int32_t ret = 0; int32_t exists = 0; + int32_t option_len = 0; gf_store_handle_t *shandle = NULL; - xlator_t *xl = NULL; + glusterd_volinfo_data_store_t *dict_data = NULL; + xlator_t *this = NULL; - xl = THIS; - GF_ASSERT(xl); + this = THIS; + GF_ASSERT(this); - shandle = (gf_store_handle_t *)data; + dict_data = (glusterd_volinfo_data_store_t *)data; + shandle = dict_data->shandle; GF_ASSERT(shandle); GF_ASSERT(shandle->fd > 0); - GF_ASSERT(shandle->path); GF_ASSERT(key); GF_ASSERT(value); GF_ASSERT(value->data); - if (is_key_glusterd_hooks_friendly(key)) { - exists = 1; + if (dict_data->key_check == 1) { + if (is_key_glusterd_hooks_friendly(key)) { + exists = 1; - } else { - exists = glusterd_check_option_exists(key, NULL); + } else { + exists = glusterd_check_option_exists(key, NULL); + } } - - if (1 == exists) { - gf_msg_debug(xl->name, 0, - "Storing in volinfo:key= %s, " + if (exists == 1 || dict_data->key_check == 0) { + gf_msg_debug(this->name, 0, + "Storing in buffer for volinfo:key= %s, " "val=%s", key, value->data); - } else { - gf_msg_debug(xl->name, 0, "Discarding:key= %s, val=%s", key, + gf_msg_debug(this->name, 0, "Discarding:key= %s, val=%s", key, value->data); return 0; } - ret = gf_store_save_value(shandle->fd, key, (char *)value->data); - if (ret) { - gf_msg(xl->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_WRITE_FAIL, - "Unable to write into store" - " handle for path: %s", - shandle->path); + /* + * The option_len considers the length of the key value + * pair and along with that '=' and '\n', but as value->len + * already considers a NULL at the end of the data, adding + * just 1. + */ + option_len = strlen(key) + value->len + 1; + + if ((VOLINFO_BUFFER_SIZE - dict_data->buffer_len - 1) < option_len) { + ret = gf_store_save_items(shandle->fd, dict_data->buffer); + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, NULL); + return -1; + } + dict_data->buffer_len = 0; + dict_data->buffer[0] = '\0'; + } + ret = snprintf(dict_data->buffer + dict_data->buffer_len, option_len + 1, + "%s=%s\n", key, value->data); + if (ret < 0 || ret > option_len + 1) { + gf_smsg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_COPY_FAIL, NULL); return -1; } + + dict_data->buffer_len += ret; + return 0; } @@ -1013,7 +1000,7 @@ glusterd_store_create_snap_dir(glusterd_snap_t *snap) return ret; } -int32_t +static int32_t glusterd_store_volinfo_write(int fd, glusterd_volinfo_t *volinfo) { int32_t ret = -1; @@ -1021,19 +1008,47 @@ glusterd_store_volinfo_write(int fd, glusterd_volinfo_t *volinfo) GF_ASSERT(fd > 0); GF_ASSERT(volinfo); GF_ASSERT(volinfo->shandle); + xlator_t *this = NULL; + glusterd_volinfo_data_store_t *dict_data = NULL; + + this = THIS; + GF_ASSERT(this); shandle = volinfo->shandle; + + dict_data = GF_CALLOC(1, sizeof(glusterd_volinfo_data_store_t), + gf_gld_mt_volinfo_dict_data_t); + if (dict_data == NULL) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL); + return -1; + } + ret = glusterd_volume_exclude_options_write(fd, volinfo); - if (ret) + if (ret) { goto out; + } + + dict_data->shandle = shandle; + dict_data->key_check = 1; shandle->fd = fd; - dict_foreach(volinfo->dict, _storeopts, shandle); + dict_foreach(volinfo->dict, _storeopts, (void *)dict_data); + + dict_data->key_check = 0; + dict_foreach(volinfo->gsync_slaves, _storeopts, (void *)dict_data); + + if (dict_data->buffer_len > 0) { + ret = gf_store_save_items(fd, dict_data->buffer); + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, NULL); + goto out; + } + } - dict_foreach(volinfo->gsync_slaves, _storeslaves, shandle); shandle->fd = 0; out: - gf_msg_debug(THIS->name, 0, "Returning %d", ret); + GF_FREE(dict_data); + gf_msg_debug(this->name, 0, "Returning %d", ret); return ret; } @@ -1274,14 +1289,6 @@ out: return ret; } -static int -_gd_store_rebalance_dict(dict_t *dict, char *key, data_t *value, void *data) -{ - int fd = *(int *)data; - - return gf_store_save_value(fd, key, value->data); -} - int32_t glusterd_store_node_state_write(int fd, glusterd_volinfo_t *volinfo) { @@ -1289,6 +1296,12 @@ glusterd_store_node_state_write(int fd, glusterd_volinfo_t *volinfo) char buf[PATH_MAX]; char uuid[UUID_SIZE + 1]; uint total_len = 0; + glusterd_volinfo_data_store_t *dict_data = NULL; + gf_store_handle_t shandle; + xlator_t *this = NULL; + + this = THIS; + GF_ASSERT(this); GF_ASSERT(fd > 0); GF_ASSERT(volinfo); @@ -1328,14 +1341,33 @@ glusterd_store_node_state_write(int fd, glusterd_volinfo_t *volinfo) } ret = gf_store_save_items(fd, buf); - if (ret) + if (ret) { goto out; + } if (volinfo->rebal.dict) { - dict_foreach(volinfo->rebal.dict, _gd_store_rebalance_dict, &fd); + dict_data = GF_CALLOC(1, sizeof(glusterd_volinfo_data_store_t), + gf_gld_mt_volinfo_dict_data_t); + if (dict_data == NULL) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL); + return -1; + } + dict_data->shandle = &shandle; + shandle.fd = fd; + dict_foreach(volinfo->rebal.dict, _storeopts, (void *)dict_data); + if (dict_data->buffer_len > 0) { + ret = gf_store_save_items(fd, dict_data->buffer); + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, + NULL); + goto out; + ; + } + } } out: - gf_msg_debug(THIS->name, 0, "Returning %d", ret); + GF_FREE(dict_data); + gf_msg_debug(this->name, 0, "Returning %d", ret); return ret; } @@ -1781,8 +1813,9 @@ glusterd_store_delete_snap(glusterd_snap_t *snap) goto out; } - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); - while (entry) { + while ((entry = sys_readdir(dir, scratch))) { + if (gf_irrelevant_entry(entry)) + continue; len = snprintf(path, PATH_MAX, "%s/%s", delete_path, entry->d_name); if ((len < 0) || (len >= PATH_MAX)) { goto stat_failed; @@ -1812,7 +1845,6 @@ glusterd_store_delete_snap(glusterd_snap_t *snap) ret ? "Failed to remove" : "Removed", entry->d_name); stat_failed: memset(path, 0, sizeof(path)); - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); } ret = sys_closedir(dir); @@ -2309,7 +2341,7 @@ glusterd_store_retrieve_snapd(glusterd_volinfo_t *volinfo) ret = 0; out: - if (gf_store_iter_destroy(iter)) { + if (gf_store_iter_destroy(&iter)) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL, "Failed to destroy store iter"); ret = -1; @@ -2642,6 +2674,13 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo) brick_count++; } + if (gf_store_iter_destroy(&tmpiter)) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL, + "Failed to destroy store iter"); + ret = -1; + goto out; + } + ret = gf_store_iter_new(volinfo->shandle, &tmpiter); if (ret) @@ -2816,13 +2855,13 @@ glusterd_store_retrieve_bricks(glusterd_volinfo_t *volinfo) ret = 0; out: - if (gf_store_iter_destroy(tmpiter)) { + if (gf_store_iter_destroy(&tmpiter)) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL, "Failed to destroy store iter"); ret = -1; } - if (gf_store_iter_destroy(iter)) { + if (gf_store_iter_destroy(&iter)) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL, "Failed to destroy store iter"); ret = -1; @@ -2955,7 +2994,7 @@ glusterd_store_retrieve_node_state(glusterd_volinfo_t *volinfo) ret = 0; out: - if (gf_store_iter_destroy(iter)) { + if (gf_store_iter_destroy(&iter)) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL, "Failed to destroy store iter"); ret = -1; @@ -3231,7 +3270,7 @@ glusterd_store_update_volinfo(glusterd_volinfo_t *volinfo) ret = 0; out: - if (gf_store_iter_destroy(iter)) { + if (gf_store_iter_destroy(&iter)) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL, "Failed to destroy store iter"); ret = -1; @@ -3336,20 +3375,6 @@ glusterd_store_set_options_path(glusterd_conf_t *conf, char *path, size_t len) snprintf(path, len, "%s/options", conf->workdir); } -int -_store_global_opts(dict_t *this, char *key, data_t *value, void *data) -{ - gf_store_handle_t *shandle = data; - - if (gf_store_save_value(shandle->fd, key, (char *)value->data)) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_STORE_HANDLE_WRITE_FAIL, - "Unable to write into store handle for key : %s, value %s", key, - (char *)value->data); - } - - return 0; -} - int32_t glusterd_store_options(xlator_t *this, dict_t *opts) { @@ -3358,13 +3383,15 @@ glusterd_store_options(xlator_t *this, dict_t *opts) char path[PATH_MAX] = {0}; int fd = -1; int32_t ret = -1; + glusterd_volinfo_data_store_t *dict_data = NULL; conf = this->private; glusterd_store_set_options_path(conf, path, sizeof(path)); ret = gf_store_handle_new(path, &shandle); - if (ret) + if (ret) { goto out; + } fd = gf_store_mkstemp(shandle); if (fd <= 0) { @@ -3372,15 +3399,30 @@ glusterd_store_options(xlator_t *this, dict_t *opts) goto out; } + dict_data = GF_CALLOC(1, sizeof(glusterd_volinfo_data_store_t), + gf_gld_mt_volinfo_dict_data_t); + if (dict_data == NULL) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL); + return -1; + } + dict_data->shandle = shandle; shandle->fd = fd; - dict_foreach(opts, _store_global_opts, shandle); - shandle->fd = 0; + dict_foreach(opts, _storeopts, (void *)dict_data); + if (dict_data->buffer_len > 0) { + ret = gf_store_save_items(fd, dict_data->buffer); + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_FILE_OP_FAILED, NULL); + goto out; + } + } + ret = gf_store_rename_tmppath(shandle); - if (ret) - goto out; out: - if ((ret < 0) && (fd > 0)) + shandle->fd = 0; + GF_FREE(dict_data); + if ((ret < 0) && (fd > 0)) { gf_store_unlink_tmppath(shandle); + } gf_store_handle_destroy(shandle); return ret; } @@ -3426,7 +3468,7 @@ glusterd_store_retrieve_options(xlator_t *this) goto out; ret = 0; out: - (void)gf_store_iter_destroy(iter); + (void)gf_store_iter_destroy(&iter); gf_store_handle_destroy(shandle); return ret; } @@ -3478,28 +3520,28 @@ glusterd_store_retrieve_volumes(xlator_t *this, glusterd_snap_t *snap) goto out; } - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); - - while (entry) { + while ((entry = sys_readdir(dir, scratch))) { + if (gf_irrelevant_entry(entry)) + continue; if (snap && ((!strcmp(entry->d_name, "geo-replication")) || (!strcmp(entry->d_name, "info")))) - goto next; + continue; len = snprintf(entry_path, PATH_MAX, "%s/%s", path, entry->d_name); - if ((len < 0) || (len >= PATH_MAX)) { - goto next; - } + if ((len < 0) || (len >= PATH_MAX)) + continue; + ret = sys_lstat(entry_path, &st); if (ret == -1) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "Failed to stat entry %s : %s", path, strerror(errno)); - goto next; + continue; } if (!S_ISDIR(st.st_mode)) { gf_msg_debug(this->name, 0, "%s is not a valid volume", entry->d_name); - goto next; + continue; } volinfo = glusterd_store_retrieve_volume(entry->d_name, snap); @@ -3522,8 +3564,6 @@ glusterd_store_retrieve_volumes(xlator_t *this, glusterd_snap_t *snap) glusterd_store_create_nodestate_sh_on_absence(volinfo); glusterd_store_perform_node_state_store(volinfo); } - next: - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); } ret = 0; @@ -3878,7 +3918,7 @@ glusterd_store_update_snap(glusterd_snap_t *snap) ret = 0; out: - if (gf_store_iter_destroy(iter)) { + if (gf_store_iter_destroy(&iter)) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STORE_ITER_DESTROY_FAIL, "Failed to destroy store iter"); ret = -1; @@ -4073,9 +4113,9 @@ glusterd_store_retrieve_snaps(xlator_t *this) goto out; } - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); - - while (entry) { + while ((entry = sys_readdir(dir, scratch))) { + if (gf_irrelevant_entry(entry)) + continue; if (strcmp(entry->d_name, GLUSTERD_MISSED_SNAPS_LIST_FILE)) { ret = glusterd_store_retrieve_snap(entry->d_name); if (ret) { @@ -4084,7 +4124,6 @@ glusterd_store_retrieve_snaps(xlator_t *this) goto out; } } - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); } /* Retrieve missed_snaps_list */ @@ -4399,7 +4438,7 @@ glusterd_store_create_peer_shandle(glusterd_peerinfo_t *peerinfo) static int32_t glusterd_store_peer_write(int fd, glusterd_peerinfo_t *peerinfo) { - char buf[128]; + char buf[PATH_MAX]; uint total_len = 0; int32_t ret = 0; int32_t i = 1; @@ -4408,7 +4447,7 @@ glusterd_store_peer_write(int fd, glusterd_peerinfo_t *peerinfo) ret = snprintf(buf + total_len, sizeof(buf) - total_len, "%s=%s\n%s=%d\n", GLUSTERD_STORE_KEY_PEER_UUID, uuid_utoa(peerinfo->uuid), GLUSTERD_STORE_KEY_PEER_STATE, peerinfo->state.state); - if (ret < 0 || ret >= sizeof(buf)) { + if (ret < 0 || ret >= sizeof(buf) - total_len) { ret = -1; goto out; } @@ -4419,7 +4458,7 @@ glusterd_store_peer_write(int fd, glusterd_peerinfo_t *peerinfo) ret = snprintf(buf + total_len, sizeof(buf) - total_len, GLUSTERD_STORE_KEY_PEER_HOSTNAME "%d=%s\n", i, hostname->hostname); - if (ret < 0 || ret >= sizeof(buf)) { + if (ret < 0 || ret >= sizeof(buf) - total_len) { ret = -1; goto out; } @@ -4531,11 +4570,9 @@ glusterd_store_retrieve_peers(xlator_t *this) goto out; } - for (;;) { - GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch); - if (!entry) { - break; - } + while ((entry = sys_readdir(dir, scratch))) { + if (gf_irrelevant_entry(entry)) + continue; if (gf_uuid_parse(entry->d_name, tmp_uuid) != 0) { gf_log(this->name, GF_LOG_WARNING, "skipping non-peer file %s", entry->d_name); @@ -4623,7 +4660,7 @@ glusterd_store_retrieve_peers(xlator_t *this) is_ok = _gf_true; next: - (void)gf_store_iter_destroy(iter); + (void)gf_store_iter_destroy(&iter); if (!is_ok) { gf_log(this->name, GF_LOG_WARNING, diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h index 04070549678..83f4df0783e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-store.h +++ b/xlators/mgmt/glusterd/src/glusterd-store.h @@ -29,7 +29,7 @@ typedef enum glusterd_store_ver_ac_ { } glusterd_volinfo_ver_ac_t; #define UUID_SIZE 36 - +#define VOLINFO_BUFFER_SIZE 4093 #define GLUSTERD_STORE_UUID_KEY "UUID" #define GLUSTERD_STORE_KEY_VOL_TYPE "type" @@ -112,6 +112,19 @@ typedef enum glusterd_store_ver_ac_ { #define GLUSTERD_STORE_KEY_GANESHA_GLOBAL "nfs-ganesha" +/* + * The structure is responsible for handling the parameter for writes into + * the buffer before it is finally written to the file. The writes will be + * of the form of key-value pairs. + */ +struct glusterd_volinfo_data_store_ { + gf_store_handle_t *shandle; /*Contains fd and path of the file */ + int16_t buffer_len; + char key_check; /* flag to check if key is to be validated before write*/ + char buffer[VOLINFO_BUFFER_SIZE]; +}; +typedef struct glusterd_volinfo_data_store_ glusterd_volinfo_data_store_t; + int32_t glusterd_store_volinfo(glusterd_volinfo_t *volinfo, glusterd_volinfo_ver_ac_t ac); diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c index 18990fe365b..ca845903c4f 100644 --- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c +++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c @@ -239,8 +239,10 @@ glusterd_svc_check_topology_identical(char *svc_name, int tmpclean = 0; int tmpfd = -1; - if ((!identical) || (!this) || (!this->private)) + if ((!identical) || (!this) || (!this->private)) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } conf = this->private; GF_VALIDATE_OR_GOTO(this->name, conf, out); @@ -358,8 +360,10 @@ glusterd_volume_svc_check_topology_identical( int tmpclean = 0; int tmpfd = -1; - if ((!identical) || (!this) || (!this->private)) + if ((!identical) || (!this) || (!this->private)) { + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } conf = this->private; GF_VALIDATE_OR_GOTO(this->name, conf, out); @@ -634,7 +638,9 @@ my_callback(struct rpc_req *req, struct iovec *iov, int count, void *v_frame) conf = this->private; GF_VALIDATE_OR_GOTO(this->name, conf, out); - GF_ATOMIC_DEC(conf->blockers); + if (GF_ATOMIC_DEC(conf->blockers) == 0) { + synccond_broadcast(&conf->cond_blockers); + } STACK_DESTROY(frame->root); out: @@ -722,7 +728,9 @@ out: if (volinfo) glusterd_volinfo_unref(volinfo); - GF_ATOMIC_DEC(conf->blockers); + if (GF_ATOMIC_DEC(conf->blockers) == 0) { + synccond_broadcast(&conf->cond_blockers); + } STACK_DESTROY(frame->root); return 0; } @@ -785,12 +793,16 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, frame = create_frame(this, this->ctx->pool); if (!frame) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FRAME_CREATE_FAIL, + NULL); goto *errlbl; } if (op == GLUSTERD_SVC_ATTACH) { dict = dict_new(); if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); ret = -ENOMEM; goto *errlbl; } @@ -808,6 +820,7 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, file_len = stbuf.st_size; volfile_content = GF_MALLOC(file_len + 1, gf_common_mt_char); if (!volfile_content) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); ret = -ENOMEM; goto *errlbl; } @@ -834,10 +847,8 @@ __glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags, ret = dict_allocate_and_serialize(dict, &brick_req.dict.dict_val, &brick_req.dict.dict_len); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, - GD_MSG_DICT_SERL_LENGTH_GET_FAIL, - "Failed to serialize dict " - "to request buffer"); + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto *errlbl; } } @@ -969,7 +980,7 @@ glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int flags) * TBD: see if there's a better way */ synclock_unlock(&conf->big_lock); - sleep(1); + synctask_sleep(1); synclock_lock(&conf->big_lock); } ret = -1; @@ -1023,7 +1034,7 @@ glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig) * TBD: see if there's a better way */ synclock_unlock(&conf->big_lock); - sleep(1); + synctask_sleep(1); synclock_lock(&conf->big_lock); } ret = -1; diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c index 99119d69e45..18b3fb13630 100644 --- a/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c +++ b/xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c @@ -162,6 +162,9 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline) char *localtime_logging = NULL; char *log_level = NULL; char daemon_log_level[30] = {0}; + char msg[1024] = { + 0, + }; int32_t len = 0; this = THIS; @@ -187,7 +190,7 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline) runinit(&runner); - if (this->ctx->cmd_args.valgrind) { + if (this->ctx->cmd_args.vgtool != _gf_none) { len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s.log", svc->proc.logdir, svc->name); if ((len < 0) || (len >= PATH_MAX)) { @@ -195,9 +198,13 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline) goto unlock; } - runner_add_args(&runner, "valgrind", "--leak-check=full", - "--trace-children=yes", "--track-origins=yes", - NULL); + if (this->ctx->cmd_args.vgtool == _gf_memcheck) + runner_add_args(&runner, "valgrind", "--leak-check=full", + "--trace-children=yes", "--track-origins=yes", + NULL); + else + runner_add_args(&runner, "valgrind", "--tool=drd", NULL); + runner_argprintf(&runner, "--log-file=%s", valgrind_logfile); } @@ -226,8 +233,8 @@ glusterd_svc_start(glusterd_svc_t *svc, int flags, dict_t *cmdline) if (cmdline) dict_foreach(cmdline, svc_add_args, (void *)&runner); - gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_START_SUCCESS, - "Starting %s service", svc->name); + snprintf(msg, sizeof(msg), "Starting %s service", svc->name); + runner_log(&runner, this->name, GF_LOG_DEBUG, msg); if (flags == PROC_START_NO_WAIT) { ret = runner_run_nowait(&runner); diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index b7039f83885..b73d37ad08e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -406,8 +406,11 @@ gd_syncop_mgmt_v3_lock(glusterd_op_t op, dict_t *op_ctx, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); gf_uuid_copy(req.txn_id, txn_id); @@ -507,8 +510,11 @@ gd_syncop_mgmt_v3_unlock(dict_t *op_ctx, glusterd_peerinfo_t *peerinfo, ret = dict_allocate_and_serialize(op_ctx, &req.dict.dict_val, &req.dict.dict_len); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } gf_uuid_copy(req.uuid, my_uuid); gf_uuid_copy(req.txn_id, txn_id); @@ -842,16 +848,21 @@ gd_syncop_mgmt_stage_op(glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t *peerid = NULL; req = GF_CALLOC(1, sizeof(*req), gf_gld_mt_mop_stage_req_t); - if (!req) + if (!req) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } gf_uuid_copy(req->uuid, my_uuid); req->op = op; ret = dict_allocate_and_serialize(dict_out, &req->buf.buf_val, &req->buf.buf_len); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); if (ret) @@ -903,6 +914,8 @@ _gd_syncop_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count, if (rsp.output.output_len) { args->dict = dict_new(); if (!args->dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); ret = -1; args->op_errno = ENOMEM; goto out; @@ -910,8 +923,11 @@ _gd_syncop_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count, ret = dict_unserialize(rsp.output.output_val, rsp.output.output_len, &args->dict); - if (ret < 0) + if (ret < 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, + GD_MSG_DICT_UNSERIALIZE_FAIL, NULL); goto out; + } } args->op_ret = rsp.op_ret; @@ -1152,16 +1168,21 @@ gd_syncop_mgmt_commit_op(glusterd_peerinfo_t *peerinfo, struct syncargs *args, uuid_t *peerid = NULL; req = GF_CALLOC(1, sizeof(*req), gf_gld_mt_mop_commit_req_t); - if (!req) + if (!req) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } gf_uuid_copy(req->uuid, my_uuid); req->op = op; ret = dict_allocate_and_serialize(dict_out, &req->buf.buf_val, &req->buf.buf_len); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL); goto out; + } GD_ALLOC_COPY_UUID(peerid, peerinfo->uuid, ret); if (ret) @@ -1278,8 +1299,10 @@ gd_stage_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, GF_ASSERT(conf); rsp_dict = dict_new(); - if (!rsp_dict) + if (!rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } if ((op == GD_OP_CREATE_VOLUME) || (op == GD_OP_ADD_BRICK) || (op == GD_OP_START_VOLUME)) @@ -1408,6 +1431,7 @@ gd_commit_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, rsp_dict = dict_new(); if (!rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -1464,8 +1488,11 @@ commit_done: if (op == GD_OP_STATUS_VOLUME) { ret = dict_get_uint32(req_dict, "cmd", &cmd); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=cmd", NULL); goto out; + } if (origin_glusterd) { if ((cmd & GF_CLI_STATUS_ALL)) { @@ -1691,10 +1718,12 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, rpc_clnt_t *rpc = NULL; dict_t *rsp_dict = NULL; int32_t cmd = GF_OP_CMD_NONE; + glusterd_volinfo_t *volinfo = NULL; this = THIS; rsp_dict = dict_new(); if (!rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -1722,18 +1751,28 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, cds_list_for_each_entry_safe(pending_node, tmp, &selected, list) { rpc = glusterd_pending_node_get_rpc(pending_node); + /* In the case of rebalance if the rpc object is null, we try to + * create the rpc object. if the rebalance daemon is down, it returns + * -1. otherwise, rpc object will be created and referenced. + */ if (!rpc) { - if (pending_node->type == GD_NODE_REBALANCE) { - ret = 0; - glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx); + if (pending_node->type == GD_NODE_REBALANCE && pending_node->node) { + volinfo = pending_node->node; + ret = glusterd_rebalance_rpc_create(volinfo); + if (ret) { + ret = 0; + glusterd_defrag_volume_node_rsp(req_dict, NULL, op_ctx); + goto out; + } else { + rpc = glusterd_defrag_rpc_get(volinfo->rebal.defrag); + } + } else { + ret = -1; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE, + "Brick Op failed " + "due to rpc failure."); goto out; } - - ret = -1; - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE, - "Brick Op failed " - "due to rpc failure."); - goto out; } ret = gd_syncop_mgmt_brick_op(rpc, pending_node, op, req_dict, op_ctx, @@ -1759,7 +1798,7 @@ gd_brick_op_phase(glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict, pending_node = NULL; ret = 0; out: - if (pending_node) + if (pending_node && pending_node->node) glusterd_pending_node_put_rpc(pending_node); if (rsp_dict) diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.h b/xlators/mgmt/glusterd/src/glusterd-syncop.h index ce4a940c7a0..a265f2135c6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.h +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.h @@ -32,7 +32,7 @@ ret = gd_syncop_submit_request(rpc, req, stb, cookie, prog, procnum, \ cbk, (xdrproc_t)xdrproc); \ if (!ret) \ - synctask_yield(stb->task); \ + synctask_yield(stb->task, NULL); \ else \ gf_asprintf(&stb->errstr, \ "%s failed. Check log file" \ diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 57ff41d0595..90ef2cf4c9c 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -79,6 +79,14 @@ #include <sys/sockio.h> #endif +#ifdef __FreeBSD__ +#include <sys/sysctl.h> +#include <sys/param.h> +#include <sys/queue.h> +#include <libprocstat.h> +#include <libutil.h> +#endif + #define NFS_PROGRAM 100003 #define NFSV3_VERSION 3 @@ -443,6 +451,8 @@ glusterd_submit_request(struct rpc_clnt *rpc, void *req, call_frame_t *frame, if (!iobref) { iobref = iobref_new(); if (!iobref) { + gf_smsg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, + NULL); goto out; } @@ -645,6 +655,7 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo) new_volinfo->dict = dict_new(); if (!new_volinfo->dict) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); GF_FREE(new_volinfo); goto out; @@ -652,6 +663,7 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo) new_volinfo->gsync_slaves = dict_new(); if (!new_volinfo->gsync_slaves) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); dict_unref(new_volinfo->dict); GF_FREE(new_volinfo); goto out; @@ -659,6 +671,7 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo) new_volinfo->gsync_active_slaves = dict_new(); if (!new_volinfo->gsync_active_slaves) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); dict_unref(new_volinfo->dict); dict_unref(new_volinfo->gsync_slaves); GF_FREE(new_volinfo); @@ -675,7 +688,9 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo) glusterd_gfproxydsvc_build(&new_volinfo->gfproxyd.svc); glusterd_shdsvc_build(&new_volinfo->shd.svc); + pthread_mutex_init(&new_volinfo->store_volinfo_lock, NULL); pthread_mutex_init(&new_volinfo->reflock, NULL); + *volinfo = glusterd_volinfo_ref(new_volinfo); ret = 0; @@ -956,7 +971,10 @@ glusterd_volinfo_delete(glusterd_volinfo_t *volinfo) glusterd_auth_cleanup(volinfo); glusterd_shd_svcproc_cleanup(&volinfo->shd); + pthread_mutex_destroy(&volinfo->store_volinfo_lock); pthread_mutex_destroy(&volinfo->reflock); + LOCK_DESTROY(&volinfo->lock); + GF_FREE(volinfo); ret = 0; out: @@ -1107,7 +1125,8 @@ glusterd_get_brick_mount_dir(char *brickpath, char *hostname, char *mount_dir) } brick_dir = &brickpath[strlen(mnt_pt)]; - brick_dir++; + if (brick_dir[0] == '/') + brick_dir++; snprintf(mount_dir, VALID_GLUSTERD_PATHMAX, "/%s", brick_dir); } @@ -1352,6 +1371,10 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo, "Reason : %s ", brickinfo->hostname, brickinfo->path, strerror(errno)); + gf_smsg( + "glusterd", GF_LOG_ERROR, errno, GD_MSG_CREATE_BRICK_DIR_FAILED, + "Brick_hostname=%s, Brick_path=%s, Reason=%s", + brickinfo->hostname, brickinfo->path, strerror(errno), NULL); goto out; } } else { @@ -1364,6 +1387,9 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo, "lstat failed on %s. " "Reason : %s", brickinfo->path, strerror(errno)); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_LSTAT_FAIL, + "Failed on Brick_path=%s, Reason=%s", brickinfo->path, + strerror(errno), NULL); goto out; } @@ -1372,6 +1398,8 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo, "The provided path %s " "which is already present, is not a directory", brickinfo->path); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED, + "Brick_path=%s", brickinfo->path, NULL); ret = -1; goto out; } @@ -1388,6 +1416,8 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo, "lstat failed on /. " "Reason : %s", strerror(errno)); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_LSTAT_FAIL, + "Failed on /, Reason=%s", strerror(errno), NULL); goto out; } @@ -1397,6 +1427,9 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo, "lstat failed on %s. " "Reason : %s", parentdir, strerror(errno)); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_LSTAT_FAIL, + "Failed on parentdir=%s, Reason=%s", parentdir, strerror(errno), + NULL); goto out; } if (strncmp(volname, GLUSTER_SHARED_STORAGE, @@ -1407,6 +1440,8 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo, len = snprintf(msg, sizeof(msg), "Brick isn't allowed to be " "created inside glusterd's working directory."); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_BRICK_CREATION_FAIL, + NULL); ret = -1; goto out; } @@ -1422,6 +1457,10 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo, "command if you want to override this " "behavior.", brickinfo->hostname, brickinfo->path); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_BRICK_CREATE_MNTPNT, + "Use 'force' at the end of the command if you want to " + "override this behavior, Brick_hostname=%s, Brick_path=%s", + brickinfo->hostname, brickinfo->path, NULL); ret = -1; goto out; } else if (parent_st.st_dev == root_st.st_dev) { @@ -1435,6 +1474,10 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo, "command if you want to override this " "behavior.", brickinfo->hostname, brickinfo->path); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_BRICK_CREATE_ROOT, + "Use 'force' at the end of the command if you want to " + "override this behavior, Brick_hostname=%s, Brick_path=%s", + brickinfo->hostname, brickinfo->path, NULL); /* If --wignore-partition flag is used, ignore warnings * related to bricks being on root partition when 'force' @@ -1466,6 +1509,10 @@ glusterd_validate_and_create_brickpath(glusterd_brickinfo_t *brickinfo, ".glusterfs directory for brick %s:%s. " "Reason : %s ", brickinfo->hostname, brickinfo->path, strerror(errno)); + gf_smsg("glusterd", GF_LOG_ERROR, errno, + GD_MSG_CREATE_GLUSTER_DIR_FAILED, + "Brick_hostname=%s, Brick_path=%s, Reason=%s", + brickinfo->hostname, brickinfo->path, strerror(errno), NULL); goto out; } @@ -1608,8 +1655,10 @@ glusterd_volinfo_find_by_volume_id(uuid_t volume_id, glusterd_volinfo_t *voliter = NULL; glusterd_conf_t *priv = NULL; - if (!volume_id) + if (!volume_id) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); return -1; + } this = THIS; priv = this->private; @@ -1881,8 +1930,11 @@ glusterd_brick_connect(glusterd_volinfo_t *volinfo, * connections is too long for unix domain socket connections. */ options = dict_new(); - if (!options) + if (!options) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, + NULL); goto out; + } ret = rpc_transport_unix_options_build(options, socketpath, 600); if (ret) @@ -2025,8 +2077,8 @@ glusterd_volume_start_glusterfs(glusterd_volinfo_t *volinfo, retry: runinit(&runner); - if (this->ctx->cmd_args.valgrind) { - /* Run bricks with valgrind */ + if (this->ctx->cmd_args.vgtool != _gf_none) { + /* Run bricks with valgrind. */ if (volinfo->logdir) { len = snprintf(valgrind_logfile, PATH_MAX, "%s/valgrind-%s-%s.log", volinfo->logdir, volinfo->volname, exp_path); @@ -2040,8 +2092,13 @@ retry: goto out; } - runner_add_args(&runner, "valgrind", "--leak-check=full", - "--trace-children=yes", "--track-origins=yes", NULL); + if (this->ctx->cmd_args.vgtool == _gf_memcheck) + runner_add_args(&runner, "valgrind", "--leak-check=full", + "--trace-children=yes", "--track-origins=yes", + NULL); + else + runner_add_args(&runner, "valgrind", "--tool=drd", NULL); + runner_argprintf(&runner, "--log-file=%s", valgrind_logfile); } @@ -2154,7 +2211,7 @@ retry: if (is_brick_mx_enabled()) runner_add_arg(&runner, "--brick-mux"); - runner_log(&runner, "", 0, "Starting GlusterFS"); + runner_log(&runner, "", GF_LOG_DEBUG, "Starting GlusterFS"); brickinfo->port = port; brickinfo->rdma_port = rdma_port; @@ -2163,7 +2220,10 @@ retry: if (wait) { synclock_unlock(&priv->big_lock); + errno = 0; ret = runner_run(&runner); + if (errno != 0) + ret = errno; synclock_lock(&priv->big_lock); if (ret == EADDRINUSE) { @@ -2745,6 +2805,15 @@ glusterd_volume_compute_cksum(glusterd_volinfo_t *volinfo, char *cksum_path, ret = -1; goto out; } + } else if (priv->op_version < GD_OP_VERSION_7_0) { + ret = get_checksum_for_path(filepath, &cksum, priv->op_version); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_GET_FAIL, + "unable to get " + "checksum for path: %s", + filepath); + goto out; + } } ret = get_checksum_for_file(fd, &cksum, priv->op_version); @@ -2864,13 +2933,19 @@ glusterd_add_bricks_hname_path_to_dict(dict_t *dict, { ret = snprintf(key, sizeof(key), "%d-hostname", index); ret = dict_set_strn(dict, key, ret, brickinfo->hostname); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } ret = snprintf(key, sizeof(key), "%d-path", index); ret = dict_set_strn(dict, key, ret, brickinfo->path); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } index++; } @@ -2998,11 +3073,16 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, goto out; ret = gd_add_vol_snap_details_to_dict(dict, pfx, volinfo); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "vol snap details", NULL); goto out; + } volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id)); if (!volume_id_str) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "volume id=%s", volinfo->volume_id, NULL); ret = -1; goto out; } @@ -3035,6 +3115,8 @@ glusterd_add_volume_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, rebalance_id_str = gf_strdup(uuid_utoa(volinfo->rebal.rebalance_id)); if (!rebalance_id_str) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "rebalance_id=%s", volinfo->rebal.rebalance_id, NULL); ret = -1; goto out; } @@ -3186,6 +3268,9 @@ out: GF_FREE(rebalance_id_str); GF_FREE(rb_id_str); + if (key[0] != '\0' && ret != 0) + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); gf_msg_debug(this->name, 0, "Returning with %d", ret); return ret; } @@ -3244,29 +3329,44 @@ glusterd_vol_add_quota_conf_to_dict(glusterd_volinfo_t *volinfo, dict_t *load, snprintf(key, sizeof(key) - 1, "%s.gfid%d", key_prefix, gfid_idx); ret = dict_set_dynstr_with_alloc(load, key, uuid_utoa(buf)); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } snprintf(key, sizeof(key) - 1, "%s.gfid-type%d", key_prefix, gfid_idx); ret = dict_set_int8(load, key, type); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } } ret = snprintf(key, sizeof(key), "%s.gfid-count", key_prefix); ret = dict_set_int32n(load, key, ret, gfid_idx); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } snprintf(key, sizeof(key), "%s.quota-cksum", key_prefix); ret = dict_set_uint32(load, key, volinfo->quota_conf_cksum); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } snprintf(key, sizeof(key), "%s.quota-version", key_prefix); ret = dict_set_uint32(load, key, volinfo->quota_conf_version); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } ret = 0; out: @@ -3599,8 +3699,11 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count, snprintf(key_prefix, sizeof(key_prefix), "volume%d", count); keylen = snprintf(key, sizeof(key), "%s.name", key_prefix); ret = dict_get_strn(peer_data, key, keylen, &volname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); goto out; + } ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { @@ -3617,8 +3720,11 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count, keylen = snprintf(key, sizeof(key), "%s.version", key_prefix); ret = dict_get_int32n(peer_data, key, keylen, &version); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); goto out; + } if (version > volinfo->version) { // Mismatch detected @@ -3626,6 +3732,7 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count, "Version of volume %s differ. local version = %d, " "remote version = %d on peer %s", volinfo->volname, volinfo->version, version, hostname); + GF_ATOMIC_INIT(volinfo->volpeerupdate, 1); *status = GLUSTERD_VOL_COMP_UPDATE_REQ; goto out; } else if (version < volinfo->version) { @@ -3637,8 +3744,11 @@ glusterd_compare_friend_volume(dict_t *peer_data, int32_t count, // snprintf(key, sizeof(key), "%s.ckusm", key_prefix); ret = dict_get_uint32(peer_data, key, &cksum); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); goto out; + } if (cksum != volinfo->cksum) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CKSUM_VERS_MISMATCH, @@ -4164,8 +4274,11 @@ glusterd_import_quota_conf(dict_t *peer_data, int vol_idx, keylen = snprintf(key, sizeof(key), "%s.gfid-count", key_prefix); ret = dict_get_int32n(peer_data, key, keylen, &gfid_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); goto out; + } ret = glusterd_quota_conf_write_header(fd); if (ret) @@ -4175,8 +4288,11 @@ glusterd_import_quota_conf(dict_t *peer_data, int vol_idx, keylen = snprintf(key, sizeof(key) - 1, "%s.gfid%d", key_prefix, gfid_idx); ret = dict_get_strn(peer_data, key, keylen, &gfid_str); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); goto out; + } snprintf(key, sizeof(key) - 1, "%s.gfid-type%d", key_prefix, gfid_idx); ret = dict_get_int8(peer_data, key, &gfid_type); @@ -4237,18 +4353,23 @@ gd_import_friend_volume_rebal_dict(dict_t *dict, int count, GF_ASSERT(dict); GF_ASSERT(volinfo); + xlator_t *this = THIS; + GF_ASSERT(this); snprintf(key_prefix, sizeof(key_prefix), "volume%d", count); ret = snprintf(key, sizeof(key), "%s.rebal-dict-count", key_prefix); ret = dict_get_int32n(dict, key, ret, &dict_count); if (ret) { /* Older peers will not have this dict */ + gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); ret = 0; goto out; } volinfo->rebal.dict = dict_new(); if (!volinfo->rebal.dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -4258,7 +4379,7 @@ gd_import_friend_volume_rebal_dict(dict_t *dict, int count, out: if (ret && volinfo->rebal.dict) dict_unref(volinfo->rebal.dict); - gf_msg_debug(THIS->name, 0, "Returning with %d", ret); + gf_msg_debug(this->name, 0, "Returning with %d", ret); return ret; } @@ -4746,7 +4867,7 @@ glusterd_volinfo_stop_stale_bricks(glusterd_volinfo_t *new_volinfo, * brick multiplexing enabled, then stop the brick process */ if (ret || (new_brickinfo->snap_status == -1) || - is_brick_mx_enabled()) { + GF_ATOMIC_GET(old_volinfo->volpeerupdate)) { /*TODO: may need to switch to 'atomic' flavour of * brick_stop, once we make peer rpc program also * synctask enabled*/ @@ -4915,8 +5036,15 @@ glusterd_import_friend_volume(dict_t *peer_data, int count) ret = snprintf(key, sizeof(key), "volume%d.update", count); ret = dict_get_int32n(peer_data, key, ret, &update); - if (ret || !update) { + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); + goto out; + } + + if (!update) { /* if update is 0 that means the volume is not imported */ + gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_VOLUME_NOT_IMPORTED, NULL); goto out; } @@ -4964,6 +5092,13 @@ glusterd_import_friend_volume(dict_t *peer_data, int count) goto out; } + ret = glusterd_create_volfiles(new_volinfo); + if (ret) + goto out; + + glusterd_list_add_order(&new_volinfo->vol_list, &priv->volumes, + glusterd_compare_volume_name); + if (glusterd_is_volume_started(new_volinfo)) { (void)glusterd_start_bricks(new_volinfo); if (glusterd_is_snapd_enabled(new_volinfo)) { @@ -4978,19 +5113,14 @@ glusterd_import_friend_volume(dict_t *peer_data, int count) } } - ret = glusterd_create_volfiles_and_notify_services(new_volinfo); - if (ret) - goto out; - ret = glusterd_import_quota_conf(peer_data, count, new_volinfo, "volume"); if (ret) { gf_event(EVENT_IMPORT_QUOTA_CONF_FAILED, "volume=%s", new_volinfo->volname); goto out; } - glusterd_list_add_order(&new_volinfo->vol_list, &priv->volumes, - glusterd_compare_volume_name); + ret = glusterd_fetchspec_notify(this); out: gf_msg_debug("glusterd", 0, "Returning with ret: %d", ret); return ret; @@ -5018,18 +5148,25 @@ glusterd_import_friend_volumes_synctask(void *opaque) goto out; peer_data = dict_new(); - if (!peer_data) + if (!peer_data) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = dict_unserialize(arg->dict_buf, arg->dictlen, &peer_data); if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_UNSERIALIZE_FAIL, + NULL); errno = ENOMEM; goto out; } ret = dict_get_int32n(peer_data, "count", SLEN("count"), &count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=count", NULL); goto out; + } synclock_lock(&conf->big_lock); @@ -5038,22 +5175,22 @@ glusterd_import_friend_volumes_synctask(void *opaque) * restarted (refer glusterd_restart_bricks ()) */ while (conf->restart_bricks) { - synclock_unlock(&conf->big_lock); - sleep(2); - synclock_lock(&conf->big_lock); + synccond_wait(&conf->cond_restart_bricks, &conf->big_lock); } conf->restart_bricks = _gf_true; while (i <= count) { ret = glusterd_import_friend_volume(peer_data, i); if (ret) { - conf->restart_bricks = _gf_false; - goto out; + break; } i++; } - glusterd_svcs_manager(NULL); + if (i > count) { + glusterd_svcs_manager(NULL); + } conf->restart_bricks = _gf_false; + synccond_broadcast(&conf->cond_restart_bricks); out: if (peer_data) dict_unref(peer_data); @@ -5077,8 +5214,11 @@ glusterd_import_friend_volumes(dict_t *peer_data) GF_ASSERT(peer_data); ret = dict_get_int32n(peer_data, "count", SLEN("count"), &count); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=count", NULL); goto out; + } while (i <= count) { ret = glusterd_import_friend_volume(peer_data, i); @@ -5097,11 +5237,16 @@ glusterd_get_global_server_quorum_ratio(dict_t *opts, double *quorum) { int ret = -1; char *quorum_str = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); ret = dict_get_strn(opts, GLUSTERD_QUORUM_RATIO_KEY, SLEN(GLUSTERD_QUORUM_RATIO_KEY), &quorum_str); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", GLUSTERD_QUORUM_RATIO_KEY, NULL); goto out; + } ret = gf_string2percent(quorum_str, quorum); if (ret) @@ -5116,11 +5261,16 @@ glusterd_get_global_opt_version(dict_t *opts, uint32_t *version) { int ret = -1; char *version_str = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); ret = dict_get_strn(opts, GLUSTERD_GLOBAL_OPT_VERSION, SLEN(GLUSTERD_GLOBAL_OPT_VERSION), &version_str); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL); goto out; + } ret = gf_string2uint(version_str, version); if (ret) @@ -5169,13 +5319,17 @@ glusterd_import_global_opts(dict_t *friend_data) SLEN("global-opt-count"), &count); if (ret) { // old version peer + gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED, + "Key=global-opt-count", NULL); ret = 0; goto out; } import_options = dict_new(); - if (!import_options) + if (!import_options) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = import_prdict_dict(friend_data, import_options, "key", "val", count, "global"); if (ret) { @@ -5246,8 +5400,11 @@ glusterd_compare_friend_data(dict_t *peer_data, int32_t *status, char *hostname) } ret = dict_get_int32n(peer_data, "count", SLEN("count"), &count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=count", NULL); goto out; + } while (i <= count) { ret = glusterd_compare_friend_volume(peer_data, i, status, hostname); @@ -5515,13 +5672,19 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count, else if (!strcmp(server, priv->scrub_svc.name)) ret = dict_set_nstrn(dict, key, keylen, "Scrubber Daemon", SLEN("Scrubber Daemon")); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "brick%d.path", count); ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(uuid_utoa(MY_UUID))); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } #ifdef BUILD_GNFS /* Port is available only for the NFS server. @@ -5532,26 +5695,38 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count, if (dict_getn(vol_opts, "nfs.port", SLEN("nfs.port"))) { ret = dict_get_int32n(vol_opts, "nfs.port", SLEN("nfs.port"), &port); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=nfs.port", NULL); goto out; + } } else port = GF_NFS3_PORT; } #endif keylen = snprintf(key, sizeof(key), "brick%d.port", count); ret = dict_set_int32n(dict, key, keylen, port); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "brick%d.pid", count); ret = dict_set_int32n(dict, key, keylen, pid); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "brick%d.status", count); ret = dict_set_int32n(dict, key, keylen, running); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } out: gf_msg_debug(THIS->name, 0, "Returning %d", ret); @@ -5733,7 +5908,9 @@ my_callback(struct rpc_req *req, struct iovec *iov, int count, void *v_frame) call_frame_t *frame = v_frame; glusterd_conf_t *conf = frame->this->private; - GF_ATOMIC_DEC(conf->blockers); + if (GF_ATOMIC_DEC(conf->blockers) == 0) { + synccond_broadcast(&conf->cond_blockers); + } STACK_DESTROY(frame->root); return 0; @@ -5835,7 +6012,9 @@ attach_brick_callback(struct rpc_req *req, struct iovec *iov, int count, } } out: - GF_ATOMIC_DEC(conf->blockers); + if (GF_ATOMIC_DEC(conf->blockers) == 0) { + synccond_broadcast(&conf->cond_blockers); + } STACK_DESTROY(frame->root); return 0; } @@ -5891,12 +6070,15 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path, iobref = iobref_new(); if (!iobref) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto *errlbl; } errlbl = &&free_iobref; frame = create_frame(this, this->ctx->pool); if (!frame) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FRAME_CREATE_FAIL, + NULL); goto *errlbl; } @@ -5927,7 +6109,6 @@ send_attach_req(xlator_t *this, struct rpc_clnt *rpc, char *path, GF_ATOMIC_INC(conf->blockers); ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL); - return ret; free_iobref: iobref_unref(iobref); @@ -5936,7 +6117,7 @@ maybe_free_iobuf: iobuf_unref(iobuf); } err: - return -1; + return ret; } extern size_t @@ -6020,7 +6201,7 @@ attach_brick(xlator_t *this, glusterd_brickinfo_t *brickinfo, * TBD: see if there's a better way */ synclock_unlock(&conf->big_lock); - sleep(1); + synctask_sleep(1); synclock_lock(&conf->big_lock); } @@ -6160,7 +6341,7 @@ find_compat_brick_in_vol(glusterd_conf_t *conf, "brick %s is still" " starting, waiting for 2 seconds ", other_brick->path); - sleep(2); + synctask_sleep(2); synclock_lock(&conf->big_lock); retries--; } @@ -6264,7 +6445,6 @@ find_compatible_brick(glusterd_conf_t *conf, glusterd_volinfo_t *volinfo, int glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len) { - char fname[128] = ""; char buf[1024] = ""; char cmdline[2048] = ""; xlator_t *this = NULL; @@ -6279,6 +6459,22 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len) this = THIS; GF_ASSERT(this); +#ifdef __FreeBSD__ + blen = sizeof(buf); + int mib[4]; + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_ARGS; + mib[3] = pid; + + if (sys_sysctl(mib, 4, buf, &blen, NULL, blen) != 0) { + gf_log(this->name, GF_LOG_ERROR, "brick process %d is not running", + pid); + return ret; + } +#else + char fname[128] = ""; snprintf(fname, sizeof(fname), "/proc/%d/cmdline", pid); if (sys_access(fname, R_OK) != 0) { @@ -6295,6 +6491,7 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len) strerror(errno), fname); return ret; } +#endif /* convert cmdline to single string */ for (i = 0, j = 0; i < blen; i++) { @@ -6343,6 +6540,43 @@ glusterd_get_sock_from_brick_pid(int pid, char *sockpath, size_t len) char * search_brick_path_from_proc(pid_t brick_pid, char *brickpath) { + char *brick_path = NULL; +#ifdef __FreeBSD__ + struct filestat *fst; + struct procstat *ps; + struct kinfo_proc *kp; + struct filestat_list *head; + + ps = procstat_open_sysctl(); + if (ps == NULL) + goto out; + + kp = kinfo_getproc(brick_pid); + if (kp == NULL) + goto out; + + head = procstat_getfiles(ps, (void *)kp, 0); + if (head == NULL) + goto out; + + STAILQ_FOREACH(fst, head, next) + { + if (fst->fs_fd < 0) + continue; + + if (!strcmp(fst->fs_path, brickpath)) { + brick_path = gf_strdup(fst->fs_path); + break; + } + } + +out: + if (head != NULL) + procstat_freefiles(ps, head); + if (kp != NULL) + free(kp); + procstat_close(ps); +#else struct dirent *dp = NULL; DIR *dirp = NULL; size_t len = 0; @@ -6353,7 +6587,6 @@ search_brick_path_from_proc(pid_t brick_pid, char *brickpath) 0, }, }; - char *brick_path = NULL; if (!brickpath) goto out; @@ -6389,7 +6622,9 @@ search_brick_path_from_proc(pid_t brick_pid, char *brickpath) } } out: - sys_closedir(dirp); + if (dirp) + sys_closedir(dirp); +#endif return brick_path; } @@ -6417,8 +6652,10 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo, GF_ASSERT(this); conf = this->private; - if ((!brickinfo) || (!volinfo)) + if ((!brickinfo) || (!volinfo)) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } if (gf_uuid_is_null(brickinfo->uuid)) { ret = glusterd_resolve_brick(brickinfo); @@ -6443,7 +6680,8 @@ glusterd_brick_start(glusterd_volinfo_t *volinfo, * three different triggers for an attempt to start the brick process * due to the quorum handling code in glusterd_friend_sm. */ - if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered) { + if (brickinfo->status == GF_BRICK_STARTING || brickinfo->start_triggered || + GF_ATOMIC_GET(volinfo->volpeerupdate)) { gf_msg_debug(this->name, 0, "brick %s is already in starting " "phase", @@ -6649,9 +6887,7 @@ glusterd_restart_bricks(void *opaque) * glusterd_compare_friend_data ()) */ while (conf->restart_bricks) { - synclock_unlock(&conf->big_lock); - sleep(2); - synclock_lock(&conf->big_lock); + synccond_wait(&conf->cond_restart_bricks, &conf->big_lock); } conf->restart_bricks = _gf_true; @@ -6765,9 +7001,12 @@ glusterd_restart_bricks(void *opaque) ret = 0; out: - GF_ATOMIC_DEC(conf->blockers); conf->restart_done = _gf_true; conf->restart_bricks = _gf_false; + if (GF_ATOMIC_DEC(conf->blockers) == 0) { + synccond_broadcast(&conf->cond_blockers); + } + synccond_broadcast(&conf->cond_restart_bricks); return_block: return ret; @@ -7105,22 +7344,26 @@ glusterd_get_brick_root(char *path, char **mount_point) char *mnt_pt = NULL; struct stat brickstat = {0}; struct stat buf = {0}; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!path) + if (!path) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto err; + } mnt_pt = gf_strdup(path); - if (!mnt_pt) + if (!mnt_pt) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto err; + } if (sys_stat(mnt_pt, &brickstat)) goto err; while ((ptr = strrchr(mnt_pt, '/')) && ptr != mnt_pt) { *ptr = '\0'; if (sys_stat(mnt_pt, &buf)) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, - "error in " - "stat: %s", - strerror(errno)); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, + "Error in stat=%s", strerror(errno), NULL); goto err; } @@ -7132,10 +7375,8 @@ glusterd_get_brick_root(char *path, char **mount_point) if (ptr == mnt_pt) { if (sys_stat("/", &buf)) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, - "error in " - "stat: %s", - strerror(errno)); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED, + "Error in stat=%s", strerror(errno), NULL); goto err; } if (brickstat.st_dev == buf.st_dev) @@ -7200,11 +7441,16 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count) }; struct fs_info *fs = NULL; static dict_t *cached_fs = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); ret = snprintf(key, sizeof(key), "brick%d.device", count); ret = dict_get_strn(dict, key, ret, &device); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); goto out; + } if (cached_fs) { if (dict_get_str(cached_fs, device, &cur_word) == 0) { @@ -7216,8 +7462,11 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count) ret = snprintf(key, sizeof(key), "brick%d.fs_name", count); ret = dict_get_strn(dict, key, ret, &fs_name); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); goto out; + } runinit(&runner); runner_redir(&runner, STDOUT_FILENO, RUN_PIPE); @@ -7226,11 +7475,9 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count) if (strcmp(fs_name, fs->fs_type_name) == 0) { if (!fs->fs_tool_name) { /* dynamic inodes */ - gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_INODE_SIZE_GET_FAIL, - "the " - "brick on %s (%s) uses dynamic inode " - "sizes", - device, fs_name); + gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_INODE_SIZE_GET_FAIL, + "The brick on device uses dynamic inode sizes", + "Device=%s (%s)", device, fs_name, NULL); cur_word = "N/A"; goto cached; } @@ -7244,19 +7491,17 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count) runner_add_arg(&runner, fs->fs_tool_arg); runner_add_arg(&runner, device); } else { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL, - "could not find %s to get" - "inode size for %s (%s): %s package missing?", - fs->fs_tool_name, device, fs_name, fs->fs_tool_pkg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL, + "Could not find tool to get inode size for device", "Tool=%s", + fs->fs_tool_name, "Device=%s (%s)", device, fs_name, + "Missing package=%s ?", fs->fs_tool_pkg, NULL); goto out; } ret = runner_start(&runner); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_CMD_EXEC_FAIL, - "failed to execute " - "\"%s\"", - fs->fs_tool_name); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_CMD_EXEC_FAIL, + "Failed to execute \"%s\"", fs->fs_tool_name, NULL); /* * Runner_start might return an error after the child has * been forked, e.g. if the program isn't there. In that @@ -7284,21 +7529,22 @@ glusterd_add_inode_size_to_dict(dict_t *dict, int count) ret = runner_end(&runner); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_CMD_EXEC_FAIL, - "%s exited with non-zero exit status", fs->fs_tool_name); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_CMD_EXEC_FAIL, + "Tool exited with non-zero exit status", "Tool=%s", + fs->fs_tool_name, NULL); goto out; } if (!cur_word) { ret = -1; - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL, - "Unable to retrieve inode size using %s", fs->fs_tool_name); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL, + "Using Tool=%s", fs->fs_tool_name, NULL); goto out; } if (dict_set_dynstr_with_alloc(cached_fs, device, cur_word)) { /* not fatal if not entered into the cache */ - gf_msg_debug(THIS->name, 0, "failed to cache fs inode size for %s", + gf_msg_debug(this->name, 0, "failed to cache fs inode size for %s", device); } @@ -7309,8 +7555,7 @@ cached: out: if (ret) - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL, - "failed to get inode size"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INODE_SIZE_GET_FAIL, NULL); return ret; } @@ -7366,16 +7611,23 @@ glusterd_add_brick_mount_details(glusterd_brickinfo_t *brickinfo, dict_t *dict, struct mntent save_entry = {0}; char *mnt_pt = NULL; struct mntent *entry = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); snprintf(base_key, sizeof(base_key), "brick%d", count); ret = glusterd_get_brick_root(brickinfo->path, &mnt_pt); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICKPATH_ROOT_GET_FAIL, + NULL); goto out; + } entry = glusterd_get_mnt_entry_info(mnt_pt, buff, sizeof(buff), &save_entry); if (!entry) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GET_MNT_ENTRY_INFO_FAIL, + NULL); ret = -1; goto out; } @@ -7384,15 +7636,21 @@ glusterd_add_brick_mount_details(glusterd_brickinfo_t *brickinfo, dict_t *dict, snprintf(key, sizeof(key), "%s.device", base_key); ret = dict_set_dynstr_with_alloc(dict, key, entry->mnt_fsname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } /* fs type */ snprintf(key, sizeof(key), "%s.fs_name", base_key); ret = dict_set_dynstr_with_alloc(dict, key, entry->mnt_type); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } /* mount options */ snprintf(key, sizeof(key), "%s.mnt_options", base_key); @@ -7486,43 +7744,61 @@ glusterd_add_brick_detail_to_dict(glusterd_volinfo_t *volinfo, block_size = brickstat.f_bsize; snprintf(key, sizeof(key), "%s.block_size", base_key); ret = dict_set_uint64(dict, key, block_size); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } /* free space in brick */ memfree = brickstat.f_bfree * brickstat.f_bsize; snprintf(key, sizeof(key), "%s.free", base_key); ret = dict_set_uint64(dict, key, memfree); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } /* total space of brick */ memtotal = brickstat.f_blocks * brickstat.f_bsize; snprintf(key, sizeof(key), "%s.total", base_key); ret = dict_set_uint64(dict, key, memtotal); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } /* inodes: total and free counts only for ext2/3/4 and xfs */ inodes_total = brickstat.f_files; if (inodes_total) { snprintf(key, sizeof(key), "%s.total_inodes", base_key); ret = dict_set_uint64(dict, key, inodes_total); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } } inodes_free = brickstat.f_ffree; if (inodes_free) { snprintf(key, sizeof(key), "%s.free_inodes", base_key); ret = dict_set_uint64(dict, key, inodes_free); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } } ret = glusterd_add_brick_mount_details(brickinfo, dict, count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_ADD_BRICK_MNT_INFO_FAIL, + NULL); goto out; + } ret = glusterd_add_inode_size_to_dict(dict, count); out: @@ -7628,8 +7904,11 @@ glusterd_add_brick_to_dict(glusterd_volinfo_t *volinfo, ret = dict_set_int32n(dict, key, keylen, brick_online); out: - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); gf_msg_debug(this->name, 0, "Returning %d", ret); + } return ret; } @@ -7710,8 +7989,10 @@ glusterd_brick_stop(glusterd_volinfo_t *volinfo, conf = this->private; GF_ASSERT(conf); - if ((!brickinfo) || (!volinfo)) + if ((!brickinfo) || (!volinfo)) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } if (gf_uuid_is_null(brickinfo->uuid)) { ret = glusterd_resolve_brick(brickinfo); @@ -7854,8 +8135,10 @@ glusterd_rb_check_bricks(glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *src, rb = &volinfo->rep_brick; - if (!rb->src_brick || !rb->dst_brick) + if (!rb->src_brick || !rb->dst_brick) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); return -1; + } if (strcmp(rb->src_brick->hostname, src->hostname) || strcmp(rb->src_brick->path, src->path)) { @@ -8001,6 +8284,8 @@ glusterd_check_and_set_brick_xattr(char *host, char *path, uuid_t uuid, char msg[2048] = ""; gf_boolean_t in_use = _gf_false; int flags = 0; + xlator_t *this = THIS; + GF_ASSERT(this); /* Check for xattr support in backend fs */ ret = sys_lsetxattr(path, "trusted.glusterfs.test", "working", 8, 0); @@ -8011,6 +8296,8 @@ glusterd_check_and_set_brick_xattr(char *host, char *path, uuid_t uuid, " extended attributes failed, reason:" " %s.", host, path, strerror(errno)); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_BRICK_FAIL, + "Host=%s, Path=%s", host, path, NULL); goto out; } else { @@ -8020,6 +8307,8 @@ glusterd_check_and_set_brick_xattr(char *host, char *path, uuid_t uuid, "Removing test extended" " attribute failed, reason: %s", strerror(errno)); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_REMOVE_XATTR_FAIL, + NULL); goto out; } } @@ -8042,6 +8331,8 @@ glusterd_check_and_set_brick_xattr(char *host, char *path, uuid_t uuid, "Failed to set extended " "attributes %s, reason: %s", GF_XATTR_VOL_ID_KEY, strerror(errno)); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL, + "Attriutes=%s", GF_XATTR_VOL_ID_KEY, NULL); goto out; } @@ -8061,7 +8352,7 @@ glusterd_sm_tr_log_transition_add_to_dict(dict_t *dict, int ret = -1; char key[64] = ""; int keylen; - char timestr[64] = ""; + char timestr[GF_TIMESTR_SIZE] = ""; char *str = NULL; GF_ASSERT(dict); @@ -8093,6 +8384,9 @@ glusterd_sm_tr_log_transition_add_to_dict(dict_t *dict, goto out; out: + if (key[0] != '\0' && ret != 0) + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); gf_msg_debug("glusterd", 0, "returning %d", ret); return ret; } @@ -8201,7 +8495,8 @@ glusterd_sm_tr_log_transition_add(glusterd_sm_tr_log_t *log, int old_state, transitions[next].old_state = old_state; transitions[next].new_state = new_state; transitions[next].event = event; - time(&transitions[next].time); + transitions[next].time = gf_time(); + log->current = next; if (log->count < log->size) log->count++; @@ -8317,8 +8612,10 @@ glusterd_get_local_brickpaths(glusterd_volinfo_t *volinfo, char **pathlist) int i = 0; glusterd_brickinfo_t *brickinfo = NULL; - if ((!volinfo) || (!pathlist)) + if ((!volinfo) || (!pathlist)) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } path_tokens = GF_CALLOC(sizeof(char *), volinfo->brick_count, gf_gld_mt_charptr); @@ -8772,6 +9069,8 @@ glusterd_nfs_statedump(char *options, int option_cnt, char **op_errstr) snprintf(msg, sizeof(msg), "for nfs statedump, options should" " be after the key nfs"); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, + "Options misplaced", NULL); *op_errstr = gf_strdup(msg); ret = -1; goto out; @@ -8839,6 +9138,8 @@ glusterd_client_statedump(char *volname, char *options, int option_cnt, dup_options = gf_strdup(options); if (!dup_options) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, + "options=%s", options, NULL); goto out; } option = strtok_r(dup_options, " ", &tmpptr); @@ -8846,6 +9147,8 @@ glusterd_client_statedump(char *volname, char *options, int option_cnt, snprintf(msg, sizeof(msg), "for gluster client statedump, options " "should be after the key 'client'"); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, + "Options misplaced", NULL); *op_errstr = gf_strdup(msg); ret = -1; goto out; @@ -8853,6 +9156,8 @@ glusterd_client_statedump(char *volname, char *options, int option_cnt, target_ip = strtok_r(NULL, " ", &tmpptr); if (target_ip == NULL) { snprintf(msg, sizeof(msg), "ip address not specified"); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, msg, + NULL); *op_errstr = gf_strdup(msg); ret = -1; goto out; @@ -8861,6 +9166,8 @@ glusterd_client_statedump(char *volname, char *options, int option_cnt, pid = strtok_r(NULL, " ", &tmpptr); if (pid == NULL) { snprintf(msg, sizeof(msg), "pid not specified"); + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, msg, + NULL); *op_errstr = gf_strdup(msg); ret = -1; goto out; @@ -8901,6 +9208,8 @@ glusterd_quotad_statedump(char *options, int option_cnt, char **op_errstr) snprintf(msg, sizeof(msg), "for quotad statedump, options " "should be after the key 'quotad'"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, + "Options misplaced", NULL); *op_errstr = gf_strdup(msg); ret = -1; goto out; @@ -9763,6 +10072,8 @@ glusterd_append_gsync_status(dict_t *dst, dict_t *src) ret = dict_get_strn(src, "gsync-status", SLEN("gsync-status"), &stop_msg); if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=gsync-status", NULL); ret = 0; goto out; } @@ -10017,8 +10328,11 @@ glusterd_sync_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict) int ret = 0; GF_ASSERT(rsp_dict); + xlator_t *this = THIS; + GF_ASSERT(this); if (!rsp_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; } @@ -10066,6 +10380,8 @@ glusterd_profile_volume_use_rsp_dict(dict_t *aggr, dict_t *rsp_dict) ret = dict_get_int32n(rsp_dict, "count", SLEN("count"), &brick_count); if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=count", NULL); ret = 0; // no bricks in the rsp goto out; } @@ -10345,6 +10661,8 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict) glusterd_volinfo_t *volinfo = NULL; GF_ASSERT(rsp_dict); + xlator_t *this = THIS; + GF_ASSERT(this); if (aggr) { ctx_dict = aggr; @@ -10354,8 +10672,11 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict) } ret = dict_get_int32n(ctx_dict, "cmd", SLEN("cmd"), &cmd); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=cmd", + NULL); goto out; + } if (cmd & GF_CLI_STATUS_ALL && is_origin_glusterd(ctx_dict)) { ret = dict_get_int32n(rsp_dict, "vol_count", SLEN("vol_count"), @@ -10363,18 +10684,27 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict) if (ret == 0) { ret = dict_set_int32n(ctx_dict, "vol_count", SLEN("vol_count"), vol_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=vol_count", NULL); goto out; + } for (i = 0; i < vol_count; i++) { keylen = snprintf(key, sizeof(key), "vol%d", i); ret = dict_get_strn(rsp_dict, key, keylen, &volname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=%s", key, NULL); goto out; + } ret = dict_set_strn(ctx_dict, key, keylen, volname); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=%s", key, NULL); goto out; + } } } else { /* Ignore the error as still the aggregation applies in @@ -10388,6 +10718,8 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict) ret = dict_get_int32n(rsp_dict, "count", SLEN("count"), &rsp_node_count); if (ret) { + gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED, "Key=count", + NULL); ret = 0; // no bricks in the rsp goto out; } @@ -10395,8 +10727,8 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict) ret = dict_get_int32n(rsp_dict, "other-count", SLEN("other-count"), &rsp_other_count); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get other count from rsp_dict"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=other-count", NULL); goto out; } @@ -10406,18 +10738,27 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict) if (!dict_getn(ctx_dict, "brick-index-max", SLEN("brick-index-max"))) { ret = dict_get_int32n(rsp_dict, "brick-index-max", SLEN("brick-index-max"), &brick_index_max); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=brick-index-max", NULL); goto out; + } ret = dict_set_int32n(ctx_dict, "brick-index-max", SLEN("brick-index-max"), brick_index_max); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=brick-index-max", NULL); goto out; + } } else { ret = dict_get_int32n(ctx_dict, "brick-index-max", SLEN("brick-index-max"), &brick_index_max); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=brick-index-max", NULL); goto out; + } } rsp_ctx.count = node_count; @@ -10430,45 +10771,45 @@ glusterd_volume_status_copy_to_op_ctx_dict(dict_t *aggr, dict_t *rsp_dict) ret = dict_set_int32n(ctx_dict, "count", SLEN("count"), node_count + rsp_node_count); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, - "Failed to update node count"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto out; } ret = dict_set_int32n(ctx_dict, "other-count", SLEN("other-count"), (other_count + rsp_other_count)); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, - "Failed to update other-count"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=other-count", NULL); goto out; } ret = dict_get_strn(ctx_dict, "volname", SLEN("volname"), &volname); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, - "Failed to get volname"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=volname", NULL); goto out; } ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, - "Failed to get volinfo for volume: %s", volname); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, + "Volume=%s", volname, NULL); goto out; } ret = dict_set_int32n(ctx_dict, "hot_brick_count", SLEN("hot_brick_count"), hot_brick_count); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, - "Failed to update hot_brick_count"); + gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=hot_brick_count", NULL); goto out; } ret = dict_set_int32n(ctx_dict, "type", SLEN("type"), type); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, - "Failed to update type"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=type", NULL); goto out; } @@ -12910,7 +13251,7 @@ glusterd_get_value_for_vme_entry(struct volopt_map_entry *vme, char **def_val) ret = xlator_option_info_list(&vol_opt_handle, key, &local_def_val, &descr); if (ret) { /*Swallow Error if option not found*/ - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GET_KEY_FAILED, + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_GET_KEY_FAILED, "Failed to get option for %s " "key", key); @@ -13067,7 +13408,9 @@ glusterd_get_global_options_for_all_vols(rpcsvc_request_t *req, dict_t *ctx, gf_asprintf(&def_val, "%d", priv->op_version); need_free = _gf_true; } else { - def_val = valid_all_vol_opts[i].dflt_val; + gf_asprintf(&def_val, "%s (DEFAULT)", + valid_all_vol_opts[i].dflt_val); + need_free = _gf_true; } } @@ -13153,6 +13496,7 @@ glusterd_get_default_val_for_volopt(dict_t *ctx, gf_boolean_t all_opts, int count = 0; xlator_t *this = NULL; char *def_val = NULL; + char *def_val_str = NULL; char dict_key[50] = ""; int keylen; gf_boolean_t key_found = _gf_false; @@ -13213,7 +13557,13 @@ glusterd_get_default_val_for_volopt(dict_t *ctx, gf_boolean_t all_opts, goto out; } sprintf(dict_key, "value%d", count); - ret = dict_set_dynstr_with_alloc(ctx, dict_key, def_val); + if (get_value_vme) { // the value was never changed - DEFAULT is used + gf_asprintf(&def_val_str, "%s (DEFAULT)", def_val); + ret = dict_set_dynstr_with_alloc(ctx, dict_key, def_val_str); + GF_FREE(def_val_str); + def_val_str = NULL; + } else + ret = dict_set_dynstr_with_alloc(ctx, dict_key, def_val); if (ret) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Failed to " @@ -13738,23 +14088,24 @@ glusterd_handle_replicate_brick_ops(glusterd_volinfo_t *volinfo, char vpath[PATH_MAX] = ""; char *volfileserver = NULL; - priv = THIS->private; - GF_VALIDATE_OR_GOTO(THIS->name, priv, out); + xlator_t *this = THIS; + GF_ASSERT(this); + priv = this->private; + GF_VALIDATE_OR_GOTO(this->name, priv, out); dirty[2] = hton32(1); ret = sys_lsetxattr(brickinfo->path, GF_AFR_DIRTY, dirty, sizeof(dirty), 0); if (ret == -1) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_SETXATTR_FAIL, - "Failed to set extended" - " attribute %s : %s.", - GF_AFR_DIRTY, strerror(errno)); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL, + "Attribute=%s", GF_AFR_DIRTY, "Reason=%s", strerror(errno), + NULL); goto out; } if (mkdtemp(tmpmount) == NULL) { - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED, - "failed to create a temporary mount directory."); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED, + NULL); ret = -1; goto out; } @@ -13765,7 +14116,7 @@ glusterd_handle_replicate_brick_ops(glusterd_volinfo_t *volinfo, switch (op) { case GD_OP_REPLACE_BRICK: - if (dict_get_strn(THIS->options, "transport.socket.bind-address", + if (dict_get_strn(this->options, "transport.socket.bind-address", SLEN("transport.socket.bind-address"), &volfileserver) != 0) volfileserver = "localhost"; @@ -13808,7 +14159,7 @@ glusterd_handle_replicate_brick_ops(glusterd_volinfo_t *volinfo, ret = runner_run(&runner); if (ret) { - gf_log(THIS->name, GF_LOG_ERROR, + gf_log(this->name, GF_LOG_ERROR, "mount command" " failed."); goto lock; @@ -13818,19 +14169,18 @@ glusterd_handle_replicate_brick_ops(glusterd_volinfo_t *volinfo, (op == GD_OP_REPLACE_BRICK) ? GF_AFR_REPLACE_BRICK : GF_AFR_ADD_BRICK, brickinfo->brick_id, sizeof(brickinfo->brick_id), 0); if (ret == -1) - gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_SETXATTR_FAIL, - "Failed to set extended" - " attribute %s : %s", - (op == GD_OP_REPLACE_BRICK) ? GF_AFR_REPLACE_BRICK - : GF_AFR_ADD_BRICK, - strerror(errno)); - gf_umount_lazy(THIS->name, tmpmount, 1); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL, + "Attribute=%s, Reason=%s", + (op == GD_OP_REPLACE_BRICK) ? GF_AFR_REPLACE_BRICK + : GF_AFR_ADD_BRICK, + strerror(errno), NULL); + gf_umount_lazy(this->name, tmpmount, 1); lock: synclock_lock(&priv->big_lock); out: if (pid) GF_FREE(pid); - gf_msg_debug("glusterd", 0, "Returning with ret"); + gf_msg_debug(this->name, 0, "Returning with ret"); return ret; } @@ -14039,6 +14389,8 @@ glusterd_brick_op_prerequisites(dict_t *dict, char **op, glusterd_op_t *gd_op, "brick: %s does not exist in " "volume: %s", *src_brick, *volname); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND, + "Brick=%s, Volume=%s", *src_brick, *volname, NULL); *op_errstr = gf_strdup(msg); goto out; } @@ -14263,8 +14615,11 @@ glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, keylen = snprintf(key, sizeof(key), "brick%d.hostname", count); ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon", SLEN("Self-heal Daemon")); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "brick%d.path", count); uuid_str = gf_strdup(uuid_utoa(MY_UUID)); @@ -14273,8 +14628,11 @@ glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, goto out; } ret = dict_set_dynstrn(dict, key, keylen, uuid_str); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } uuid_str = NULL; /* shd doesn't have a port. but the cli needs a port key with @@ -14283,8 +14641,11 @@ glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, keylen = snprintf(key, sizeof(key), "brick%d.port", count); ret = dict_set_int32n(dict, key, keylen, 0); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } pidfile = volinfo->shd.svc.proc.pidfile; @@ -14295,8 +14656,11 @@ glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, pid = -1; keylen = snprintf(key, sizeof(key), "brick%d.pid", count); ret = dict_set_int32n(dict, key, keylen, pid); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s", + key, NULL); goto out; + } keylen = snprintf(key, sizeof(key), "brick%d.status", count); ret = dict_set_int32n(dict, key, keylen, brick_online); @@ -14311,3 +14675,372 @@ out: return ret; } + +static gf_ai_compare_t +glusterd_compare_addrinfo(struct addrinfo *first, struct addrinfo *next) +{ + int ret = -1; + struct addrinfo *tmp1 = NULL; + struct addrinfo *tmp2 = NULL; + char firstip[NI_MAXHOST] = {0.}; + char nextip[NI_MAXHOST] = { + 0, + }; + + for (tmp1 = first; tmp1 != NULL; tmp1 = tmp1->ai_next) { + ret = getnameinfo(tmp1->ai_addr, tmp1->ai_addrlen, firstip, NI_MAXHOST, + NULL, 0, NI_NUMERICHOST); + if (ret) + return GF_AI_COMPARE_ERROR; + for (tmp2 = next; tmp2 != NULL; tmp2 = tmp2->ai_next) { + ret = getnameinfo(tmp2->ai_addr, tmp2->ai_addrlen, nextip, + NI_MAXHOST, NULL, 0, NI_NUMERICHOST); + if (ret) + return GF_AI_COMPARE_ERROR; + if (!strcmp(firstip, nextip)) { + return GF_AI_COMPARE_MATCH; + } + } + } + return GF_AI_COMPARE_NO_MATCH; +} + +/* Check for non optimal brick order for Replicate/Disperse : + * Checks if bricks belonging to a replicate or disperse + * volume are present on the same server + */ +int32_t +glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type, + char **volname, char **brick_list, + int32_t *brick_count, int32_t sub_count) +{ + int ret = -1; + int i = 0; + int j = 0; + int k = 0; + xlator_t *this = NULL; + addrinfo_list_t *ai_list = NULL; + addrinfo_list_t *ai_list_tmp1 = NULL; + addrinfo_list_t *ai_list_tmp2 = NULL; + char *brick = NULL; + char *brick_list_dup = NULL; + char *brick_list_ptr = NULL; + char *tmpptr = NULL; + struct addrinfo *ai_info = NULL; + char brick_addr[128] = { + 0, + }; + int addrlen = 0; + + const char failed_string[2048] = + "Failed to perform brick order " + "check. Use 'force' at the end of the command" + " if you want to override this behavior. "; + const char found_string[2048] = + "Multiple bricks of a %s " + "volume are present on the same server. This " + "setup is not optimal. Bricks should be on " + "different nodes to have best fault tolerant " + "configuration. Use 'force' at the end of the " + "command if you want to override this " + "behavior. "; + + this = THIS; + + GF_ASSERT(this); + + ai_list = MALLOC(sizeof(addrinfo_list_t)); + ai_list->info = NULL; + CDS_INIT_LIST_HEAD(&ai_list->list); + + if (!(*volname)) { + ret = dict_get_strn(dict, "volname", SLEN("volname"), &(*volname)); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + } + + if (!(*brick_list)) { + ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &(*brick_list)); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Bricks check : Could not " + "retrieve bricks list"); + goto out; + } + } + + if (!(*brick_count)) { + ret = dict_get_int32n(dict, "count", SLEN("count"), &(*brick_count)); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Bricks check : Could not " + "retrieve brick count"); + goto out; + } + } + + brick_list_dup = brick_list_ptr = gf_strdup(*brick_list); + /* Resolve hostnames and get addrinfo */ + while (i < *brick_count) { + ++i; + brick = strtok_r(brick_list_dup, " \n", &tmpptr); + brick_list_dup = tmpptr; + if (brick == NULL) + goto check_failed; + tmpptr = strrchr(brick, ':'); + if (tmpptr == NULL) + goto check_failed; + addrlen = strlen(brick) - strlen(tmpptr); + strncpy(brick_addr, brick, addrlen); + brick_addr[addrlen] = '\0'; + ret = getaddrinfo(brick_addr, NULL, NULL, &ai_info); + if (ret != 0) { + ret = 0; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL, + "unable to resolve host name for addr %s", brick_addr); + goto out; + } + ai_list_tmp1 = MALLOC(sizeof(addrinfo_list_t)); + if (ai_list_tmp1 == NULL) { + ret = 0; + gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, + "failed to allocate " + "memory"); + freeaddrinfo(ai_info); + goto out; + } + ai_list_tmp1->info = ai_info; + cds_list_add_tail(&ai_list_tmp1->list, &ai_list->list); + ai_list_tmp1 = NULL; + } + + i = 0; + ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list); + + if (*brick_count < sub_count) { + sub_count = *brick_count; + } + + /* Check for bad brick order */ + while (i < *brick_count) { + ++i; + ai_info = ai_list_tmp1->info; + ai_list_tmp1 = cds_list_entry(ai_list_tmp1->list.next, addrinfo_list_t, + list); + if (0 == i % sub_count) { + j = 0; + continue; + } + ai_list_tmp2 = ai_list_tmp1; + k = j; + while (k < sub_count - 1) { + ++k; + ret = glusterd_compare_addrinfo(ai_info, ai_list_tmp2->info); + if (GF_AI_COMPARE_ERROR == ret) + goto check_failed; + if (GF_AI_COMPARE_MATCH == ret) + goto found_bad_brick_order; + ai_list_tmp2 = cds_list_entry(ai_list_tmp2->list.next, + addrinfo_list_t, list); + } + ++j; + } + gf_msg_debug(this->name, 0, "Brick order okay"); + ret = 0; + goto out; + +check_failed: + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER_CHECK_FAIL, + "Failed bad brick order check"); + snprintf(err_str, sizeof(failed_string), failed_string); + ret = -1; + goto out; + +found_bad_brick_order: + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BAD_BRKORDER, + "Bad brick order found"); + if (type == GF_CLUSTER_TYPE_DISPERSE) { + snprintf(err_str, sizeof(found_string), found_string, "disperse"); + } else { + snprintf(err_str, sizeof(found_string), found_string, "replicate"); + } + + ret = -1; +out: + ai_list_tmp2 = NULL; + GF_FREE(brick_list_ptr); + cds_list_for_each_entry(ai_list_tmp1, &ai_list->list, list) + { + if (ai_list_tmp1->info) + freeaddrinfo(ai_list_tmp1->info); + free(ai_list_tmp2); + ai_list_tmp2 = ai_list_tmp1; + } + free(ai_list); + free(ai_list_tmp2); + gf_msg_debug("glusterd", 0, "Returning %d", ret); + return ret; +} + +static gf_boolean_t +search_peer_in_auth_list(char *peer_hostname, char *auth_allow_list) +{ + if (strstr(auth_allow_list, peer_hostname)) { + return _gf_true; + } + + return _gf_false; +} + +/* glusterd_add_peers_to_auth_list() adds peers into auth.allow list + * if auth.allow list is not empty. This is called for add-brick and + * replica brick operations to avoid failing the temporary mount. New + * volfiles will be generated and clients are notified reg new volfiles. + */ +void +glusterd_add_peers_to_auth_list(char *volname) +{ + int ret = 0; + glusterd_volinfo_t *volinfo = NULL; + glusterd_peerinfo_t *peerinfo = NULL; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; + int32_t len = 0; + char *auth_allow_list = NULL; + char *new_auth_allow_list = NULL; + + this = THIS; + GF_ASSERT(this); + conf = this->private; + GF_ASSERT(conf); + + GF_VALIDATE_OR_GOTO(this->name, volname, out); + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, + "Unable to find volume: %s", volname); + goto out; + } + + ret = dict_get_str_sizen(volinfo->dict, "auth.allow", &auth_allow_list); + if (ret) { + gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED, + "auth allow list is not set"); + goto out; + } + cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list) + { + len += strlen(peerinfo->hostname); + } + len += strlen(auth_allow_list) + 1; + + new_auth_allow_list = GF_CALLOC(1, len, gf_common_mt_char); + + new_auth_allow_list = strncat(new_auth_allow_list, auth_allow_list, + strlen(auth_allow_list)); + cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list) + { + ret = search_peer_in_auth_list(peerinfo->hostname, new_auth_allow_list); + if (!ret) { + gf_log(this->name, GF_LOG_DEBUG, + "peer %s not found in auth.allow list", peerinfo->hostname); + new_auth_allow_list = strcat(new_auth_allow_list, ","); + new_auth_allow_list = strncat(new_auth_allow_list, + peerinfo->hostname, + strlen(peerinfo->hostname)); + } + } + if (strcmp(new_auth_allow_list, auth_allow_list) != 0) { + /* In case, new_auth_allow_list is not same as auth_allow_list, + * we need to update the volinfo->dict with new_auth_allow_list. + * we delete the auth_allow_list and replace it with + * new_auth_allow_list. for reverting the changes in post commit, we + * keep the copy of auth_allow_list as old_auth_allow_list in + * volinfo->dict. + */ + dict_del_sizen(volinfo->dict, "auth.allow"); + ret = dict_set_strn(volinfo->dict, "auth.allow", SLEN("auth.allow"), + new_auth_allow_list); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Unable to set new auth.allow list"); + goto out; + } + ret = dict_set_strn(volinfo->dict, "old.auth.allow", + SLEN("old.auth.allow"), auth_allow_list); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Unable to set old auth.allow list"); + goto out; + } + ret = glusterd_create_volfiles_and_notify_services(volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLFILE_CREATE_FAIL, + "failed to create volfiles"); + goto out; + } + } +out: + GF_FREE(new_auth_allow_list); + return; +} + +int +glusterd_replace_old_auth_allow_list(char *volname) +{ + int ret = 0; + glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = NULL; + char *old_auth_allow_list = NULL; + + this = THIS; + GF_ASSERT(this); + + GF_VALIDATE_OR_GOTO(this->name, volname, out); + + ret = glusterd_volinfo_find(volname, &volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, + "Unable to find volume: %s", volname); + goto out; + } + + ret = dict_get_str_sizen(volinfo->dict, "old.auth.allow", + &old_auth_allow_list); + if (ret) { + gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_FAILED, + "old auth allow list is not set, no need to replace the list"); + ret = 0; + goto out; + } + + dict_del_sizen(volinfo->dict, "auth.allow"); + ret = dict_set_strn(volinfo->dict, "auth.allow", SLEN("auth.allow"), + old_auth_allow_list); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Unable to replace auth.allow list"); + goto out; + } + + dict_del_sizen(volinfo->dict, "old.auth.allow"); + + ret = glusterd_create_volfiles_and_notify_services(volinfo); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLFILE_CREATE_FAIL, + "failed to create volfiles"); + goto out; + } + ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); + if (ret) { + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_STORE_FAIL, + "failed to store volinfo"); + goto out; + } +out: + return ret; +} diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index b58f158fd14..bf6ac295e26 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -857,4 +857,9 @@ search_brick_path_from_proc(pid_t brick_pid, char *brickpath); int32_t glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict, int32_t count); +int32_t +glusterd_check_brick_order(dict_t *dict, char *err_str, int32_t type, + char **volname, char **bricks, int32_t *brick_count, + int32_t sub_count); + #endif diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c index d9e5e865fbd..8d6fb5e0fac 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volgen.c +++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c @@ -90,6 +90,8 @@ xlator_instantiate_va(const char *type, const char *format, va_list arg) xlator_t *xl = NULL; char *volname = NULL; int ret = 0; + xlator_t *this = THIS; + GF_ASSERT(this); ret = gf_vasprintf(&volname, format, arg); if (ret < 0) { @@ -99,14 +101,21 @@ xlator_instantiate_va(const char *type, const char *format, va_list arg) } xl = GF_CALLOC(1, sizeof(*xl), gf_common_mt_xlator_t); - if (!xl) + if (!xl) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto error; + } ret = xlator_set_type_virtual(xl, type); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_XLATOR_SET_OPT_FAIL, + NULL); goto error; + } xl->options = dict_new(); - if (!xl->options) + if (!xl->options) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto error; + } xl->name = volname; CDS_INIT_LIST_HEAD(&xl->volume_options); @@ -115,8 +124,8 @@ xlator_instantiate_va(const char *type, const char *format, va_list arg) return xl; error: - gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_XLATOR_CREATE_FAIL, - "creating xlator of type %s failed", type); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_XLATOR_CREATE_FAIL, "Type=%s", + type, NULL); GF_FREE(volname); if (xl) xlator_destroy(xl); @@ -865,6 +874,8 @@ _xl_link_children(xlator_t *parent, xlator_t *children, size_t child_count) xlator_t *trav = NULL; size_t seek = 0; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); if (child_count == 0) goto out; @@ -873,9 +884,12 @@ _xl_link_children(xlator_t *parent, xlator_t *children, size_t child_count) ; for (; child_count--; trav = trav->prev) { ret = volgen_xlator_link(parent, trav); - gf_msg_debug(THIS->name, 0, "%s:%s", parent->name, trav->name); - if (ret) + gf_msg_debug(this->name, 0, "%s:%s", parent->name, trav->name); + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_XLATOR_LINK_FAIL, + NULL); goto out; + } } ret = 0; out: @@ -933,8 +947,10 @@ volgen_apply_filters(char *orig_volfile) entry = sys_readdir(filterdir, scratch); - if (!entry || errno != 0) + if (!entry || errno != 0) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_READ_ERROR, NULL); break; + } if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) continue; @@ -1472,14 +1488,22 @@ volgen_graph_set_xl_options(volgen_graph_t *graph, dict_t *dict) }; /* for posix* -> *posix* */ char *loglevel = NULL; xlator_t *trav = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); ret = dict_get_str_sizen(dict, "xlator", &xlator); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=xlator", NULL); goto out; + } ret = dict_get_str_sizen(dict, "loglevel", &loglevel); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=loglevel", NULL); goto out; + } snprintf(xlator_match, 1024, "*%s", xlator); @@ -1579,14 +1603,22 @@ gfproxy_server_graph_builder(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, char *password = NULL; /*int rclusters = 0;*/ + xlator_t *this = THIS; + GF_ASSERT(this); /* We are a trusted client */ ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED); - if (ret != 0) + if (ret != 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=trusted-client", NULL); goto out; + } ret = dict_set_int32_sizen(set_dict, "gfproxy-server", 1); - if (ret != 0) + if (ret != 0) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=gfproxy-server", NULL); goto out; + } /* Build the client section of the graph first */ build_client_graph(graph, volinfo, set_dict); @@ -1647,11 +1679,13 @@ brick_graph_add_posix(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, xlator_t *this = NULL; glusterd_conf_t *priv = NULL; - if (!graph || !volinfo || !set_dict || !brickinfo) + this = THIS; + + if (!graph || !volinfo || !set_dict || !brickinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } - this = THIS; - GF_VALIDATE_OR_GOTO("glusterd", this, out); priv = this->private; GF_VALIDATE_OR_GOTO("glusterd", priv, out); @@ -1716,9 +1750,13 @@ brick_graph_add_selinux(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { xlator_t *xl = NULL; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo) + if (!graph || !volinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/selinux", volinfo->volname); if (!xl) @@ -1785,8 +1823,10 @@ brick_graph_add_bitrot_stub(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, char *value = NULL; xlator_t *this = THIS; - if (!graph || !volinfo || !set_dict || !brickinfo) + if (!graph || !volinfo || !set_dict || !brickinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/bitrot-stub", volinfo->volname); if (!xl) @@ -1821,9 +1861,13 @@ brick_graph_add_changelog(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, }; int ret = -1; int32_t len = 0; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict || !brickinfo) + if (!graph || !volinfo || !set_dict || !brickinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/changelog", volinfo->volname); if (!xl) @@ -1836,12 +1880,26 @@ brick_graph_add_changelog(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, len = snprintf(changelog_basepath, sizeof(changelog_basepath), "%s/%s", brickinfo->path, ".glusterfs/changelogs"); if ((len < 0) || (len >= sizeof(changelog_basepath))) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } ret = xlator_set_fixed_option(xl, "changelog-dir", changelog_basepath); if (ret) goto out; + + ret = glusterd_is_bitrot_enabled(volinfo); + if (ret == -1) { + goto out; + } else if (ret) { + ret = xlator_set_fixed_option(xl, "changelog-notification", "on"); + if (ret) + goto out; + } else { + ret = xlator_set_fixed_option(xl, "changelog-notification", "off"); + if (ret) + goto out; + } out: return ret; } @@ -1852,14 +1910,31 @@ brick_graph_add_acl(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { xlator_t *xl = NULL; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } - xl = volgen_graph_add(graph, "features/access-control", volinfo->volname); - if (!xl) + ret = dict_get_str_boolean(set_dict, "features.acl", 1); + if (!ret) { + /* Skip creating this volume if option is disabled */ + /* By default, this is 'true' */ goto out; + } else if (ret < 0) { + /* lets not treat this as error, as this option is not critical, + and implemented for debug help */ + gf_log(THIS->name, GF_LOG_INFO, + "failed to get 'features.acl' flag from dict"); + } + xl = volgen_graph_add(graph, "features/access-control", volinfo->volname); + if (!xl) { + ret = -1; + goto out; + } ret = 0; out: return ret; @@ -1871,9 +1946,13 @@ brick_graph_add_locks(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { xlator_t *xl = NULL; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/locks", volinfo->volname); if (!xl) @@ -1890,9 +1969,13 @@ brick_graph_add_iot(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { xlator_t *xl = NULL; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "performance/io-threads", volinfo->volname); if (!xl) @@ -1908,9 +1991,12 @@ brick_graph_add_barrier(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { xlator_t *xl = NULL; int ret = -1; + xlator_t *this = THIS; - if (!graph || !volinfo) + if (!graph || !volinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/barrier", volinfo->volname); if (!xl) @@ -1927,9 +2013,13 @@ brick_graph_add_sdfs(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { xlator_t *xl = NULL; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo) + if (!graph || !volinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } if (!dict_get_str_boolean(set_dict, "features.sdfs", 0)) { /* update only if option is enabled */ @@ -1957,9 +2047,13 @@ brick_graph_add_namespace(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { xlator_t *xl = NULL; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } ret = dict_get_str_boolean(set_dict, "features.tag-namespaces", 0); if (ret == -1) @@ -2012,9 +2106,13 @@ brick_graph_add_index(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, char index_basepath[PATH_MAX] = {0}; int ret = -1; int32_t len = 0; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !brickinfo || !set_dict) + if (!graph || !volinfo || !brickinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/index", volinfo->volname); if (!xl) @@ -2023,6 +2121,7 @@ brick_graph_add_index(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, len = snprintf(index_basepath, sizeof(index_basepath), "%s/%s", brickinfo->path, ".glusterfs/indices"); if ((len < 0) || (len >= sizeof(index_basepath))) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); goto out; } @@ -2069,9 +2168,13 @@ brick_graph_add_marker(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, char buf[32] = { 0, }; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/marker", volinfo->volname); if (!xl) @@ -2102,9 +2205,13 @@ brick_graph_add_quota(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, int ret = -1; xlator_t *xl = NULL; char *value = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/quota", volinfo->volname); if (!xl) @@ -2130,9 +2237,13 @@ brick_graph_add_ro(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { int ret = -1; xlator_t *xl = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } if (dict_get_str_boolean(set_dict, "features.read-only", 0) && (dict_get_str_boolean(set_dict, "features.worm", 0) || @@ -2162,9 +2273,13 @@ brick_graph_add_worm(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { int ret = -1; xlator_t *xl = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } if (dict_get_str_boolean(set_dict, "features.read-only", 0) && (dict_get_str_boolean(set_dict, "features.worm", 0) || @@ -2191,9 +2306,13 @@ brick_graph_add_cdc(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { int ret = -1; xlator_t *xl = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } /* Check for compress volume option, and add it to the graph on * server side */ @@ -2223,8 +2342,10 @@ brick_graph_add_io_stats(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, xlator_t *this = THIS; glusterd_conf_t *priv = this->private; - if (!graph || !set_dict || !brickinfo) + if (!graph || !set_dict || !brickinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add_as(graph, "debug/io-stats", brickinfo->path); if (!xl) @@ -2252,9 +2373,13 @@ brick_graph_add_upcall(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { xlator_t *xl = NULL; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/upcall", volinfo->volname); if (!xl) { @@ -2274,9 +2399,13 @@ brick_graph_add_leases(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, { xlator_t *xl = NULL; int ret = -1; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } xl = volgen_graph_add(graph, "features/leases", volinfo->volname); if (!xl) { @@ -2306,9 +2435,13 @@ brick_graph_add_server(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, char *volname = NULL; char *address_family_data = NULL; int32_t len = 0; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict || !brickinfo) + if (!graph || !volinfo || !set_dict || !brickinfo) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } get_vol_transport_type(volinfo, transt); @@ -2417,13 +2550,20 @@ brick_graph_add_pump(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, char *password = NULL; char *ptranst = NULL; char *address_family_data = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); - if (!graph || !volinfo || !set_dict) + if (!graph || !volinfo || !set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } ret = dict_get_int32(volinfo->dict, "enable-pump", &pump); - if (ret == -ENOENT) + if (ret == -ENOENT) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=enable-pump", NULL); ret = pump = 0; + } if (ret) return -1; @@ -3221,11 +3361,20 @@ volgen_link_bricks(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, j); j++; } + if (!xl) { ret = -1; goto out; } + if (strncmp(xl_type, "performance/readdir-ahead", + SLEN("performance/readdir-ahead")) == 0) { + ret = xlator_set_fixed_option(xl, "performance.readdir-ahead", + "on"); + if (ret) + goto out; + } + ret = volgen_xlator_link(xl, trav); if (ret) goto out; @@ -3453,13 +3602,13 @@ volgen_graph_build_readdir_ahead(volgen_graph_t *graph, int32_t clusters = 0; if (graph->type == GF_QUOTAD || graph->type == GF_SNAPD || - !glusterd_volinfo_get_boolean(volinfo, VKEY_PARALLEL_READDIR) || - !glusterd_volinfo_get_boolean(volinfo, VKEY_READDIR_AHEAD)) + !glusterd_volinfo_get_boolean(volinfo, VKEY_PARALLEL_READDIR)) goto out; clusters = volgen_link_bricks_from_list_tail( graph, volinfo, "performance/readdir-ahead", "%s-readdir-ahead-%d", child_count, 1); + out: return clusters; } @@ -3613,6 +3762,8 @@ set_afr_pending_xattrs_option(volgen_graph_t *graph, * for client xlators in volfile. * ta client xlator indexes are - 2, 5, 8 depending on the index of * subvol. e.g- For first subvol ta client xlator id is volname-ta-2 + * For pending-xattr, ta name would be + * 'volname-ta-2.{{volume-uuid}}' from GD_OP_VERSION_7_3. */ ta_brick_index = 0; if (volinfo->thin_arbiter_count == 1) { @@ -3625,8 +3776,16 @@ set_afr_pending_xattrs_option(volgen_graph_t *graph, } ta_brick_index++; } - - strncat(ptr, ta_brick->brick_id, strlen(ta_brick->brick_id)); + if (conf->op_version < GD_OP_VERSION_7_3) { + strncat(ptr, ta_brick->brick_id, + strlen(ta_brick->brick_id)); + } else { + char ta_volname[PATH_MAX] = ""; + int len = snprintf(ta_volname, PATH_MAX, "%s.%s", + ta_brick->brick_id, + uuid_utoa(volinfo->volume_id)); + strncat(ptr, ta_volname, len); + } } ret = xlator_set_fixed_option(afr_xlators_list[index++], @@ -3651,6 +3810,38 @@ out: } static int +set_volfile_id_option(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, + int clusters) +{ + xlator_t *xlator = NULL; + int i = 0; + int ret = -1; + glusterd_conf_t *conf = NULL; + xlator_t *this = NULL; + + this = THIS; + GF_VALIDATE_OR_GOTO("glusterd", this, out); + conf = this->private; + GF_VALIDATE_OR_GOTO(this->name, conf, out); + + if (conf->op_version < GD_OP_VERSION_9_0) + return 0; + xlator = first_of(graph); + + for (i = 0; i < clusters; i++) { + ret = xlator_set_fixed_option(xlator, "volume-id", + uuid_utoa(volinfo->volume_id)); + if (ret) + goto out; + + xlator = xlator->next; + } + +out: + return ret; +} + +static int volgen_graph_build_afr_clusters(volgen_graph_t *graph, glusterd_volinfo_t *volinfo) { @@ -3692,6 +3883,13 @@ volgen_graph_build_afr_clusters(volgen_graph_t *graph, clusters = -1; goto out; } + + ret = set_volfile_id_option(graph, volinfo, clusters); + if (ret) { + clusters = -1; + goto out; + } + if (!volinfo->arbiter_count && !volinfo->thin_arbiter_count) goto out; @@ -4308,6 +4506,12 @@ bitrot_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme, return -1; } + if (!strcmp(vme->option, "signer-threads")) { + ret = xlator_set_fixed_option(xl, "signer-threads", vme->value); + if (ret) + return -1; + } + return ret; } @@ -4390,11 +4594,15 @@ nfs_option_handler(volgen_graph_t *graph, struct volopt_map_entry *vme, volinfo = param; - if (!volinfo || (volinfo->volname[0] == '\0')) + if (!volinfo || (volinfo->volname[0] == '\0')) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); return 0; + } - if (!vme || !(vme->option)) + if (!vme || !(vme->option)) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); return 0; + } xl = first_of(graph); @@ -4525,8 +4733,11 @@ prepare_shd_volume_options(glusterd_volinfo_t *volinfo, dict_t *mod_dict, goto out; ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=trusted-client", NULL); goto out; + } dict_copy(volinfo->dict, set_dict); if (mod_dict) @@ -4713,6 +4924,7 @@ build_shd_graph(glusterd_volinfo_t *volinfo, volgen_graph_t *graph, set_dict = dict_new(); if (!set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -ENOMEM; goto out; } @@ -4851,25 +5063,40 @@ build_nfs_graph(volgen_graph_t *graph, dict_t *mod_dict) ret = dict_set_sizen_str_sizen(set_dict, "performance.stat-prefetch", "off"); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=performance.stat-prefetch", NULL); goto out; + } ret = dict_set_sizen_str_sizen(set_dict, "performance.client-io-threads", "off"); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=performance.client-io-threads", NULL); goto out; + } ret = dict_set_str_sizen(set_dict, "client-transport-type", nfs_xprt); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=client-transport-type", NULL); goto out; + } ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=trusted-client", NULL); goto out; + } ret = dict_set_sizen_str_sizen(set_dict, "nfs-volume-file", "yes"); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=nfs-volume-file", NULL); goto out; + } if (mod_dict && (data = dict_get_sizen(mod_dict, "volume-name"))) { volname = data->data; @@ -5111,8 +5338,11 @@ build_quotad_graph(volgen_graph_t *graph, dict_t *mod_dict) continue; ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=trusted-client", NULL); goto out; + } dict_copy(voliter->dict, set_dict); if (mod_dict) @@ -5308,14 +5538,21 @@ glusterd_generate_client_per_brick_volfile(glusterd_volinfo_t *volinfo) int ret = -1; char *ssl_str = NULL; gf_boolean_t ssl_bool = _gf_false; + xlator_t *this = THIS; + GF_ASSERT(this); dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = dict_set_uint32(dict, "trusted-client", GF_CLIENT_TRUSTED); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=trusted-client", NULL); goto free_dict; + } if (dict_get_str_sizen(volinfo->dict, "client.ssl", &ssl_str) == 0) { if (gf_string2boolean(ssl_str, &ssl_bool) == 0) { @@ -5397,17 +5634,25 @@ generate_dummy_client_volfiles(glusterd_volinfo_t *volinfo) enumerate_transport_reqs(volinfo->transport_type, types); dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } for (i = 0; types[i]; i++) { ret = dict_set_str(dict, "client-transport-type", types[i]); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=client-transport-type", NULL); goto out; + } type = transport_str_to_type(types[i]); ret = dict_set_uint32(dict, "trusted-client", GF_CLIENT_OTHER); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=trusted-client", NULL); goto out; + } ret = glusterd_get_dummy_client_filepath(filepath, volinfo, type); if (ret) { @@ -5468,17 +5713,25 @@ generate_client_volfiles(glusterd_volinfo_t *volinfo, enumerate_transport_reqs(volinfo->transport_type, types); dict = dict_new(); - if (!dict) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } for (i = 0; types[i]; i++) { ret = dict_set_str(dict, "client-transport-type", types[i]); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=client-transport-type", NULL); goto out; + } type = transport_str_to_type(types[i]); ret = dict_set_uint32(dict, "trusted-client", client_type); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=trusted-client", NULL); goto out; + } if (client_type == GF_CLIENT_TRUSTED) { ret = glusterd_get_trusted_client_filepath(filepath, volinfo, type); @@ -5628,10 +5881,15 @@ prepare_bitrot_scrub_volume_options(glusterd_volinfo_t *volinfo, dict_t *mod_dict, dict_t *set_dict) { int ret = 0; + xlator_t *this = THIS; + GF_ASSERT(this); ret = dict_set_uint32(set_dict, "trusted-client", GF_CLIENT_TRUSTED); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=trusted-client", NULL); goto out; + } dict_copy(volinfo->dict, set_dict); if (mod_dict) @@ -5699,6 +5957,7 @@ build_bitd_volume_graph(volgen_graph_t *graph, glusterd_volinfo_t *volinfo, set_dict = dict_new(); if (!set_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } @@ -6105,8 +6364,11 @@ validate_shdopts(glusterd_volinfo_t *volinfo, dict_t *val_dict, goto out; } ret = dict_set_int32_sizen(val_dict, "graph-check", 1); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=graph-check", NULL); goto out; + } ret = build_shd_graph(volinfo, &graph, val_dict); if (!ret) ret = graph_reconf_validateopt(&graph.graph, op_errstr); @@ -6160,6 +6422,8 @@ validate_nfsopts(glusterd_volinfo_t *volinfo, dict_t *val_dict, "wrong transport " "type %s", tt); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INCOMPATIBLE_VALUE, + "Type=%s", tt, NULL); *op_errstr = gf_strdup(err_str); ret = -1; goto out; @@ -6228,6 +6492,7 @@ validate_brickopts(glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo, graph.errstr = op_errstr; full_dict = dict_new(); if (!full_dict) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); ret = -1; goto out; } diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 849cac4f0d9..814ab14fb27 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -37,240 +37,6 @@ #define glusterd_op_start_volume_args_get(dict, volname, flags) \ glusterd_op_stop_volume_args_get(dict, volname, flags) -gf_ai_compare_t -glusterd_compare_addrinfo(struct addrinfo *first, struct addrinfo *next) -{ - int ret = -1; - struct addrinfo *tmp1 = NULL; - struct addrinfo *tmp2 = NULL; - char firstip[NI_MAXHOST] = {0.}; - char nextip[NI_MAXHOST] = { - 0, - }; - - for (tmp1 = first; tmp1 != NULL; tmp1 = tmp1->ai_next) { - ret = getnameinfo(tmp1->ai_addr, tmp1->ai_addrlen, firstip, NI_MAXHOST, - NULL, 0, NI_NUMERICHOST); - if (ret) - return GF_AI_COMPARE_ERROR; - for (tmp2 = next; tmp2 != NULL; tmp2 = tmp2->ai_next) { - ret = getnameinfo(tmp2->ai_addr, tmp2->ai_addrlen, nextip, - NI_MAXHOST, NULL, 0, NI_NUMERICHOST); - if (ret) - return GF_AI_COMPARE_ERROR; - if (!strcmp(firstip, nextip)) { - return GF_AI_COMPARE_MATCH; - } - } - } - return GF_AI_COMPARE_NO_MATCH; -} - -/* Check for non optimal brick order for replicate : - * Checks if bricks belonging to a replicate volume - * are present on the same server - */ -int32_t -glusterd_check_brick_order(dict_t *dict, char *err_str) -{ - int ret = -1; - int i = 0; - int j = 0; - int k = 0; - xlator_t *this = NULL; - addrinfo_list_t *ai_list = NULL; - addrinfo_list_t *ai_list_tmp1 = NULL; - addrinfo_list_t *ai_list_tmp2 = NULL; - char *brick = NULL; - char *brick_list = NULL; - char *brick_list_dup = NULL; - char *brick_list_ptr = NULL; - char *tmpptr = NULL; - char *volname = NULL; - int32_t brick_count = 0; - int32_t type = GF_CLUSTER_TYPE_NONE; - int32_t sub_count = 0; - struct addrinfo *ai_info = NULL; - char brick_addr[128] = { - 0, - }; - int addrlen = 0; - - const char failed_string[2048] = - "Failed to perform brick order " - "check. Use 'force' at the end of the command" - " if you want to override this behavior. "; - const char found_string[2048] = - "Multiple bricks of a %s " - "volume are present on the same server. This " - "setup is not optimal. Bricks should be on " - "different nodes to have best fault tolerant " - "configuration. Use 'force' at the end of the " - "command if you want to override this " - "behavior. "; - - this = THIS; - - GF_ASSERT(this); - - ai_list = MALLOC(sizeof(addrinfo_list_t)); - ai_list->info = NULL; - CDS_INIT_LIST_HEAD(&ai_list->list); - - ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get volume name"); - goto out; - } - - ret = dict_get_int32n(dict, "type", SLEN("type"), &type); - if (ret) { - snprintf(err_str, 512, "Unable to get type of volume %s", volname); - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s", - err_str); - goto out; - } - - ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &brick_list); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Bricks check : Could not " - "retrieve bricks list"); - goto out; - } - - ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Bricks check : Could not " - "retrieve brick count"); - goto out; - } - - if (type != GF_CLUSTER_TYPE_DISPERSE) { - ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), - &sub_count); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Bricks check : Could" - " not retrieve replica count"); - goto out; - } - gf_msg_debug(this->name, 0, - "Replicate cluster type " - "found. Checking brick order."); - } else { - ret = dict_get_int32n(dict, "disperse-count", SLEN("disperse-count"), - &sub_count); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Bricks check : Could" - " not retrieve disperse count"); - goto out; - } - gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DISPERSE_CLUSTER_FOUND, - "Disperse cluster type" - " found. Checking brick order."); - } - - brick_list_dup = brick_list_ptr = gf_strdup(brick_list); - /* Resolve hostnames and get addrinfo */ - while (i < brick_count) { - ++i; - brick = strtok_r(brick_list_dup, " \n", &tmpptr); - brick_list_dup = tmpptr; - if (brick == NULL) - goto check_failed; - tmpptr = strrchr(brick, ':'); - if (tmpptr == NULL) - goto check_failed; - addrlen = strlen(brick) - strlen(tmpptr); - strncpy(brick_addr, brick, addrlen); - brick_addr[addrlen] = '\0'; - ret = getaddrinfo(brick_addr, NULL, NULL, &ai_info); - if (ret != 0) { - ret = 0; - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL, - "unable to resolve host name for addr %s", brick_addr); - goto out; - } - ai_list_tmp1 = MALLOC(sizeof(addrinfo_list_t)); - if (ai_list_tmp1 == NULL) { - ret = 0; - gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, - "failed to allocate " - "memory"); - freeaddrinfo(ai_info); - goto out; - } - ai_list_tmp1->info = ai_info; - cds_list_add_tail(&ai_list_tmp1->list, &ai_list->list); - ai_list_tmp1 = NULL; - } - - i = 0; - ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list); - - /* Check for bad brick order */ - while (i < brick_count) { - ++i; - ai_info = ai_list_tmp1->info; - ai_list_tmp1 = cds_list_entry(ai_list_tmp1->list.next, addrinfo_list_t, - list); - if (0 == i % sub_count) { - j = 0; - continue; - } - ai_list_tmp2 = ai_list_tmp1; - k = j; - while (k < sub_count - 1) { - ++k; - ret = glusterd_compare_addrinfo(ai_info, ai_list_tmp2->info); - if (GF_AI_COMPARE_ERROR == ret) - goto check_failed; - if (GF_AI_COMPARE_MATCH == ret) - goto found_bad_brick_order; - ai_list_tmp2 = cds_list_entry(ai_list_tmp2->list.next, - addrinfo_list_t, list); - } - ++j; - } - gf_msg_debug(this->name, 0, "Brick order okay"); - ret = 0; - goto out; - -check_failed: - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER_CHECK_FAIL, - "Failed bad brick order check"); - snprintf(err_str, sizeof(failed_string), failed_string); - ret = -1; - goto out; - -found_bad_brick_order: - gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BAD_BRKORDER, - "Bad brick order found"); - if (type == GF_CLUSTER_TYPE_DISPERSE) { - snprintf(err_str, sizeof(found_string), found_string, "disperse"); - } else { - snprintf(err_str, sizeof(found_string), found_string, "replicate"); - } - - ret = -1; -out: - ai_list_tmp2 = NULL; - GF_FREE(brick_list_ptr); - cds_list_for_each_entry(ai_list_tmp1, &ai_list->list, list) - { - if (ai_list_tmp1->info) - freeaddrinfo(ai_list_tmp1->info); - free(ai_list_tmp2); - ai_list_tmp2 = ai_list_tmp1; - } - free(ai_list_tmp2); - return ret; -} - int __glusterd_handle_create_volume(rpcsvc_request_t *req) { @@ -809,10 +575,14 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict, int ret = 0; char *key = NULL; char *value = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"), (int32_t *)&heal_op); if (ret || (heal_op == GF_SHD_OP_INVALID)) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=heal-op", NULL); ret = -1; goto out; } @@ -850,21 +620,33 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict, } else { key = "cluster.granular-entry-heal"; ret = dict_set_int8(dict, "is-special-key", 1); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=is-special-key", NULL); goto out; + } } ret = dict_set_strn(dict, "key1", SLEN("key1"), key); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=key1", NULL); goto out; + } ret = dict_set_strn(dict, "value1", SLEN("value1"), value); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=value1", NULL); goto out; + } ret = dict_set_int32n(dict, "count", SLEN("count"), 1); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto out; + } ret = glusterd_op_begin_synctask(req, GD_OP_SET_VOLUME, dict); @@ -888,18 +670,19 @@ __glusterd_handle_cli_heal_volume(rpcsvc_request_t *req) 0, }; + this = THIS; + GF_ASSERT(this); + GF_ASSERT(req); ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { // failed to decode msg; req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } - this = THIS; - GF_ASSERT(this); - if (cli_req.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new(); @@ -960,8 +743,11 @@ __glusterd_handle_cli_heal_volume(rpcsvc_request_t *req) goto out; ret = dict_set_int32n(dict, "count", SLEN("count"), volinfo->brick_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto out; + } ret = glusterd_op_begin_synctask(req, GD_OP_HEAL_VOLUME, dict); @@ -1013,6 +799,7 @@ __glusterd_handle_cli_statedump_volume(rpcsvc_request_t *req) ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } if (cli_req.dict.dict_len) { @@ -1107,6 +894,8 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, int32_t local_brick_count = 0; int32_t i = 0; int32_t type = 0; + int32_t replica_count = 0; + int32_t disperse_count = 0; char *brick = NULL; char *tmpptr = NULL; xlator_t *this = NULL; @@ -1201,16 +990,44 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, } if (!is_force) { - if ((type == GF_CLUSTER_TYPE_REPLICATE) || - (type == GF_CLUSTER_TYPE_DISPERSE)) { - ret = glusterd_check_brick_order(dict, msg); + if (type == GF_CLUSTER_TYPE_REPLICATE) { + ret = dict_get_int32n(dict, "replica-count", + SLEN("replica-count"), &replica_count); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Bricks check : Could" + " not retrieve replica count"); + goto out; + } + gf_msg_debug(this->name, 0, + "Replicate cluster type " + "found. Checking brick order."); + ret = glusterd_check_brick_order(dict, msg, type, &volname, + &bricks, &brick_count, + replica_count); + } else if (type == GF_CLUSTER_TYPE_DISPERSE) { + ret = dict_get_int32n(dict, "disperse-count", + SLEN("disperse-count"), &disperse_count); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER, - "Not " - "creating volume because of " - "bad brick order"); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Bricks check : Could" + " not retrieve disperse count"); goto out; } + gf_msg_debug(this->name, 0, + "Disperse cluster type" + " found. Checking brick order."); + ret = glusterd_check_brick_order(dict, msg, type, &volname, + &bricks, &brick_count, + disperse_count); + } + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER, + "Not creating the volume because of " + "bad brick order. %s", + msg); + *op_errstr = gf_strdup(msg); + goto out; } } } @@ -1325,20 +1142,32 @@ glusterd_op_stop_volume_args_get(dict_t *dict, char **volname, int *flags) this = THIS; GF_ASSERT(this); - if (!dict || !volname || !flags) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } + + if (!volname) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL); + goto out; + } + + if (!flags) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL); + goto out; + } ret = dict_get_strn(dict, "volname", SLEN("volname"), volname); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get volume name"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); goto out; } ret = dict_get_int32n(dict, "flags", SLEN("flags"), flags); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get flags"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=flags", NULL); goto out; } out: @@ -1351,27 +1180,29 @@ glusterd_op_statedump_volume_args_get(dict_t *dict, char **volname, { int ret = -1; - if (!dict || !volname || !options || !option_cnt) + if (!dict || !volname || !options || !option_cnt) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } ret = dict_get_strn(dict, "volname", SLEN("volname"), volname); if (ret) { - gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get volname"); + gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); goto out; } ret = dict_get_strn(dict, "options", SLEN("options"), options); if (ret) { - gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get options"); + gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=options", NULL); goto out; } ret = dict_get_int32n(dict, "option_cnt", SLEN("option_cnt"), option_cnt); if (ret) { - gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get option count"); + gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=option_cnt", NULL); goto out; } @@ -1598,8 +1429,13 @@ glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr) GF_ASSERT(this); ret = glusterd_op_stop_volume_args_get(dict, &volname, &flags); - if (ret) + if (ret) { + snprintf(msg, sizeof(msg), "Failed to get details of volume %s", + volname); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_STOP_ARGS_GET_FAILED, + "Volume name=%s", volname, NULL); goto out; + } ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { @@ -1803,7 +1639,9 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, if (!glusterd_is_volume_replicate(volinfo)) { ret = -1; snprintf(msg, sizeof(msg), - "Volume %s is not of type " + "This command is supported " + "for only volume of replicated " + "type. Volume %s is not of type " "replicate", volinfo->volname); *op_errstr = gf_strdup(msg); @@ -1879,14 +1717,15 @@ glusterd_op_stage_heal_volume(dict_t *dict, char **op_errstr) if (!glusterd_is_volume_started(volinfo)) { ret = -1; snprintf(msg, sizeof(msg), "Volume %s is not started.", volname); - gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_STARTED, "%s", - msg); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_STARTED, + "Volume=%s", volname, NULL); *op_errstr = gf_strdup(msg); goto out; } opt_dict = volinfo->dict; if (!opt_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, NULL); ret = 0; goto out; } @@ -1942,6 +1781,8 @@ glusterd_op_stage_statedump_volume(dict_t *dict, char **op_errstr) ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, + "Volume=%s", volname, NULL); goto out; } @@ -2105,8 +1946,6 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr) goto out; } - pthread_mutex_init(&volinfo->store_volinfo_lock, NULL); - ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); if (ret) { @@ -3058,33 +2897,35 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) char *mntpt = NULL; char **xl_opts = NULL; glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get volume name"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); goto out; } gf_msg_debug("glusterd", 0, "Performing clearlocks on volume %s", volname); ret = dict_get_strn(dict, "path", SLEN("path"), &path); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get path"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=path", + NULL); goto out; } ret = dict_get_strn(dict, "kind", SLEN("kind"), &kind); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get kind"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=kind", + NULL); goto out; } ret = dict_get_strn(dict, "type", SLEN("type"), &type); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get type"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=type", + NULL); goto out; } @@ -3092,10 +2933,9 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) if (ret) ret = 0; - gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD, - "Received clear-locks request for " - "volume %s with kind %s type %s and options %s", - volname, kind, type, opts); + gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD, + "Volume=%s, Kind=%s, Type=%s, Options=%s", volname, kind, type, + opts, NULL); if (opts) ret = gf_asprintf(&cmd_str, GF_XATTR_CLRLK_CMD ".t%s.k%s.%s", type, @@ -3108,22 +2948,25 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { snprintf(msg, sizeof(msg), "Volume %s doesn't exist.", volname); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "Volume=%s", + volname, NULL); goto out; } xl_opts = GF_CALLOC(volinfo->brick_count + 1, sizeof(char *), gf_gld_mt_charptr); - if (!xl_opts) + if (!xl_opts) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } ret = glusterd_clearlocks_get_local_client_ports(volinfo, xl_opts); if (ret) { snprintf(msg, sizeof(msg), "Couldn't get port numbers of " "local bricks"); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRK_PORT_NUM_GET_FAIL, "%s", - msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRK_PORT_NUM_GET_FAIL, + NULL); goto out; } @@ -3132,8 +2975,8 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) snprintf(msg, sizeof(msg), "Creating mount directory " "for clear-locks failed."); - gf_msg(THIS->name, GF_LOG_ERROR, 0, - GD_MSG_CLRLOCKS_MOUNTDIR_CREATE_FAIL, "%s", msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, + GD_MSG_CLRLOCKS_MOUNTDIR_CREATE_FAIL, NULL); goto out; } @@ -3142,16 +2985,15 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) snprintf(msg, sizeof(msg), "Failed to mount clear-locks " "maintenance client."); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_CLRLOCKS_CLNT_MOUNT_FAIL, - "%s", msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLRLOCKS_CLNT_MOUNT_FAIL, + NULL); goto out; } ret = glusterd_clearlocks_send_cmd(volinfo, cmd_str, path, result, msg, sizeof(msg), mntpt); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_CLRCLK_SND_CMD_FAIL, "%s", - msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLRCLK_SND_CMD_FAIL, NULL); goto umount; } @@ -3162,16 +3004,16 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) snprintf(msg, sizeof(msg), "Failed to set clear-locks " "result"); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "%s", msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=lk-summary", NULL); } umount: glusterd_clearlocks_unmount(volinfo, mntpt); if (glusterd_clearlocks_rmdir_mount(volinfo, mntpt)) - gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_CLRLOCKS_CLNT_UMOUNT_FAIL, - "Couldn't unmount " - "clear-locks mount point"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_CLRLOCKS_CLNT_UMOUNT_FAIL, + NULL); out: if (ret) diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-set.c b/xlators/mgmt/glusterd/src/glusterd-volume-set.c index cec5b1a78d3..398b4d76f52 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-set.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-set.c @@ -8,6 +8,7 @@ later), or the GNU General Public License, version 2 (GPLv2), in all cases as published by the Free Software Foundation. */ +#include <glusterfs/syscall.h> #include "glusterd-volgen.h" #include "glusterd-utils.h" @@ -786,6 +787,32 @@ out: return ret; } +static int +is_directory(const char *path) +{ + struct stat statbuf; + if (sys_stat(path, &statbuf) != 0) + return 0; + return S_ISDIR(statbuf.st_mode); +} +static int +validate_statedump_path(glusterd_volinfo_t *volinfo, dict_t *dict, char *key, + char *value, char **op_errstr) +{ + xlator_t *this = NULL; + this = THIS; + GF_ASSERT(this); + + int ret = 0; + if (!is_directory(value)) { + gf_asprintf(op_errstr, "Failed: %s is not a directory", value); + ret = -1; + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s", + *op_errstr); + } + + return ret; +} /* dispatch table for VOLUME SET * ----------------------------- @@ -1239,10 +1266,21 @@ struct volopt_map_entry glusterd_volopt_map[] = { .option = "priority", .op_version = 1, .flags = VOLOPT_FLAG_CLIENT_OPT}, - {.key = "performance.cache-size", + {.key = "performance.io-cache-size", .voltype = "performance/io-cache", - .op_version = 1, + .option = "cache-size", + .op_version = GD_OP_VERSION_8_0, .flags = VOLOPT_FLAG_CLIENT_OPT}, + { + .key = "performance.cache-size", + .voltype = "performance/io-cache", + .op_version = 1, + .flags = VOLOPT_FLAG_CLIENT_OPT, + .description = "Deprecated option. Use performance.io-cache-size " + "to adjust the cache size of the io-cache translator, " + "and use performance.quick-read-cache-size to adjust " + "the cache size of the quick-read translator.", + }, /* IO-threads xlator options */ {.key = "performance.io-thread-count", @@ -1282,16 +1320,29 @@ struct volopt_map_entry glusterd_volopt_map[] = { .voltype = "performance/io-cache", .option = "pass-through", .op_version = GD_OP_VERSION_4_1_0}, + {.key = "performance.quick-read-cache-size", + .voltype = "performance/quick-read", + .option = "cache-size", + .op_version = GD_OP_VERSION_8_0, + .flags = VOLOPT_FLAG_CLIENT_OPT}, {.key = "performance.cache-size", .voltype = "performance/quick-read", .type = NO_DOC, .op_version = 1, .flags = VOLOPT_FLAG_CLIENT_OPT}, + {.key = "performance.quick-read-cache-timeout", + .voltype = "performance/quick-read", + .option = "cache-timeout", + .op_version = GD_OP_VERSION_8_0, + .flags = VOLOPT_FLAG_CLIENT_OPT}, {.key = "performance.qr-cache-timeout", .voltype = "performance/quick-read", .option = "cache-timeout", .op_version = 1, - .flags = VOLOPT_FLAG_CLIENT_OPT}, + .flags = VOLOPT_FLAG_CLIENT_OPT, + .description = + "Deprecated option. Use performance.quick-read-cache-timeout " + "instead."}, {.key = "performance.quick-read-cache-invalidation", .voltype = "performance/quick-read", .option = "quick-read-cache-invalidation", @@ -1588,7 +1639,8 @@ struct volopt_map_entry glusterd_volopt_map[] = { {.key = "server.statedump-path", .voltype = "protocol/server", .option = "statedump-path", - .op_version = 1}, + .op_version = 1, + .validate_fn = validate_statedump_path}, {.key = "server.outstanding-rpc-limit", .voltype = "protocol/server", .option = "rpc.outstanding-rpc-limit", @@ -1761,7 +1813,7 @@ struct volopt_map_entry glusterd_volopt_map[] = { {.key = "performance.readdir-ahead", .voltype = "performance/readdir-ahead", .option = "!perf", - .value = "on", + .value = "off", .op_version = 3, .description = "enable/disable readdir-ahead translator in the volume.", .flags = VOLOPT_FLAG_CLIENT_OPT | VOLOPT_FLAG_XLATOR_OPT}, @@ -2434,7 +2486,6 @@ struct volopt_map_entry glusterd_volopt_map[] = { .voltype = "storage/posix", .op_version = GD_OP_VERSION_4_1_0, }, - {.key = "storage.bd-aio", .voltype = "storage/bd", .op_version = 3}, {.key = "config.memory-accounting", .voltype = "mgmt/glusterd", .option = "!config", @@ -2651,6 +2702,15 @@ struct volopt_map_entry glusterd_volopt_map[] = { .op_version = GD_OP_VERSION_3_7_0, .type = NO_DOC, }, + { + .key = "features.signer-threads", + .voltype = "features/bit-rot", + .value = BR_WORKERS, + .option = "signer-threads", + .op_version = GD_OP_VERSION_8_0, + .type = NO_DOC, + }, + /* Upcall translator options */ /* Upcall translator options */ { .key = "features.cache-invalidation", @@ -3067,4 +3127,20 @@ struct volopt_map_entry glusterd_volopt_map[] = { .voltype = "features/cloudsync", .op_version = GD_OP_VERSION_7_0, .flags = VOLOPT_FLAG_CLIENT_OPT}, + { + .key = "features.acl", + .voltype = "features/access-control", + .value = "enable", + .option = "!features", + .op_version = GD_OP_VERSION_8_0, + .description = "(WARNING: for debug purpose only) enable/disable " + "access-control xlator in volume", + .type = NO_DOC, + }, + + {.key = "cluster.use-anonymous-inode", + .voltype = "cluster/replicate", + .op_version = GD_OP_VERSION_9_0, + .value = "yes", + .flags = VOLOPT_FLAG_CLIENT_OPT}, {.key = NULL}}; diff --git a/xlators/mgmt/glusterd/src/glusterd.c b/xlators/mgmt/glusterd/src/glusterd.c index cd2c5da628b..7a86c2997b1 100644 --- a/xlators/mgmt/glusterd/src/glusterd.c +++ b/xlators/mgmt/glusterd/src/glusterd.c @@ -67,7 +67,7 @@ extern struct rpcsvc_program gd_svc_cli_trusted_progs; extern struct rpc_clnt_program gd_brick_prog; extern struct rpcsvc_program glusterd_mgmt_hndsk_prog; -extern char snap_mount_dir[PATH_MAX]; +extern char snap_mount_dir[VALID_GLUSTERD_PATHMAX]; rpcsvc_cbk_program_t glusterd_cbk_prog = { .progname = "Gluster Callback", @@ -202,8 +202,10 @@ glusterd_options_init(xlator_t *this) priv = this->private; priv->opts = dict_new(); - if (!priv->opts) + if (!priv->opts) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } ret = glusterd_store_retrieve_options(this); if (ret == 0) { @@ -247,6 +249,7 @@ glusterd_client_statedump_submit_req(char *volname, char *target_ip, char *pid) GF_ASSERT(conf); if (target_ip == NULL || pid == NULL) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); ret = -1; goto out; } @@ -447,14 +450,19 @@ glusterd_rpcsvc_options_build(dict_t *options) { int ret = 0; uint32_t backlog = 0; + xlator_t *this = THIS; + GF_ASSERT(this); ret = dict_get_uint32(options, "transport.listen-backlog", &backlog); if (ret) { backlog = GLUSTERFS_SOCKET_LISTEN_BACKLOG; ret = dict_set_uint32(options, "transport.listen-backlog", backlog); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=transport.listen-backlog", NULL); goto out; + } } gf_msg_debug("glusterd", 0, "listen-backlog value: %d", backlog); @@ -574,6 +582,7 @@ glusterd_crt_georep_folders(char *georepdir, glusterd_conf_t *conf) len = snprintf(georepdir, PATH_MAX, "%s/" GEOREP, conf->workdir); if ((len < 0) || (len >= PATH_MAX)) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -585,9 +594,11 @@ glusterd_crt_georep_folders(char *georepdir, glusterd_conf_t *conf) } ret = dict_get_str(THIS->options, GEOREP "-log-group", &greplg_s); - if (ret) + if (ret) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=log-group", NULL); ret = 0; - else { + } else { gr = getgrnam(greplg_s); if (!gr) { gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_LOGGROUP_INVALID, @@ -628,6 +639,7 @@ glusterd_crt_georep_folders(char *georepdir, glusterd_conf_t *conf) } len = snprintf(logdir, PATH_MAX, "%s/" GEOREP "-slaves", conf->logdir); if ((len < 0) || (len >= PATH_MAX)) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -654,6 +666,7 @@ glusterd_crt_georep_folders(char *georepdir, glusterd_conf_t *conf) len = snprintf(logdir, PATH_MAX, "%s/" GEOREP "-slaves/mbr", conf->logdir); if ((len < 0) || (len >= PATH_MAX)) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL); ret = -1; goto out; } @@ -1045,6 +1058,8 @@ _install_mount_spec(dict_t *opts, char *key, data_t *value, void *data) int rv = 0; gf_mount_spec_t *mspec = NULL; char *user = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); label = strtail(key, "mountbroker."); @@ -1059,8 +1074,10 @@ _install_mount_spec(dict_t *opts, char *key, data_t *value, void *data) return 0; mspec = GF_CALLOC(1, sizeof(*mspec), gf_gld_mt_mount_spec); - if (!mspec) + if (!mspec) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto err; + } mspec->label = label; if (georep) { @@ -1116,8 +1133,10 @@ glusterd_init_uds_listener(xlator_t *this) GF_ASSERT(this); options = dict_new(); - if (!options) + if (!options) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL); goto out; + } sock_data = dict_get(this->options, "glusterd-sockfile"); (void)snprintf(sockfile, sizeof(sockfile), "%s", @@ -1404,7 +1423,7 @@ init(xlator_t *this) char *mountbroker_root = NULL; int i = 0; int total_transport = 0; - gf_boolean_t valgrind = _gf_false; + gf_valgrind_tool vgtool; char *valgrind_str = NULL; char *transport_type = NULL; char var_run_dir[PATH_MAX] = { @@ -1417,6 +1436,14 @@ init(xlator_t *this) int32_t len = 0; int op_version = 0; +#if defined(RUN_WITH_MEMCHECK) + vgtool = _gf_memcheck; +#elif defined(RUN_WITH_DRD) + vgtool = _gf_drd; +#else + vgtool = _gf_none; +#endif + #ifndef GF_DARWIN_HOST_OS { struct rlimit lim; @@ -1424,9 +1451,8 @@ init(xlator_t *this) lim.rlim_max = 65536; if (setrlimit(RLIMIT_NOFILE, &lim) == -1) { - gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_SETXATTR_FAIL, - "Failed to set 'ulimit -n " - " 65536'"); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SET_XATTR_FAIL, + "Failed to set 'ulimit -n 65536'", NULL); } else { gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_FILE_DESC_LIMIT_SET, "Maximum allowed open file descriptors " @@ -1872,6 +1898,9 @@ init(xlator_t *this) (void)strncpy(conf->logdir, logdir, sizeof(conf->logdir)); synclock_init(&conf->big_lock, SYNC_LOCK_RECURSIVE); + synccond_init(&conf->cond_restart_bricks); + synccond_init(&conf->cond_restart_shd); + synccond_init(&conf->cond_blockers); pthread_mutex_init(&conf->xprt_lock, NULL); INIT_LIST_HEAD(&conf->xprt_list); pthread_mutex_init(&conf->import_volumes, NULL); @@ -1904,18 +1933,24 @@ init(xlator_t *this) } /* Set option to run bricks on valgrind if enabled in glusterd.vol */ - this->ctx->cmd_args.valgrind = valgrind; + this->ctx->cmd_args.vgtool = vgtool; ret = dict_get_str(this->options, "run-with-valgrind", &valgrind_str); if (ret < 0) { gf_msg_debug(this->name, 0, "cannot get run-with-valgrind value"); } if (valgrind_str) { - if (gf_string2boolean(valgrind_str, &valgrind)) { + gf_boolean_t vg = _gf_false; + + if (!strcmp(valgrind_str, "memcheck")) + this->ctx->cmd_args.vgtool = _gf_memcheck; + else if (!strcmp(valgrind_str, "drd")) + this->ctx->cmd_args.vgtool = _gf_drd; + else if (!gf_string2boolean(valgrind_str, &vg)) + this->ctx->cmd_args.vgtool = (vg ? _gf_memcheck : _gf_none); + else gf_msg(this->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY, - "run-with-valgrind value not a boolean string"); - } else { - this->ctx->cmd_args.valgrind = valgrind; - } + "run-with-valgrind is neither boolean" + " nor one of 'memcheck' or 'drd'"); } /* Store ping-timeout in conf */ diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index d7e4da8425a..cc4f98ecf47 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -209,6 +209,9 @@ typedef struct { gf_boolean_t restart_done; dict_t *opts; synclock_t big_lock; + synccond_t cond_restart_bricks; + synccond_t cond_restart_shd; + synccond_t cond_blockers; rpcsvc_t *uds_rpc; /* RPCSVC for the unix domain socket */ uint32_t base_port; uint32_t max_port; @@ -510,6 +513,10 @@ struct glusterd_volinfo_ { * volfile generation code, we are * temporarily appending either "-hot" * or "-cold" */ + gf_atomic_t volpeerupdate; + /* Flag to check about volume has received updates + from peer + */ }; typedef enum gd_snap_status_ { @@ -1190,6 +1197,8 @@ glusterd_op_set_ganesha(dict_t *dict, char **errstr); int ganesha_manage_export(dict_t *dict, char *value, gf_boolean_t update_cache_invalidation, char **op_errstr); +int +gd_ganesha_send_dbus(char *volname, char *value); gf_boolean_t glusterd_is_ganesha_cluster(); gf_boolean_t @@ -1211,6 +1220,9 @@ int glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr); int +glusterd_set_rebalance_id_for_remove_brick(dict_t *req_dict, dict_t *rsp_dict); + +int glusterd_set_rebalance_id_in_rsp_dict(dict_t *req_dict, dict_t *rsp_dict); int @@ -1354,4 +1366,10 @@ glusterd_options_init(xlator_t *this); int32_t glusterd_recreate_volfiles(glusterd_conf_t *conf); +void +glusterd_add_peers_to_auth_list(char *volname); + +int +glusterd_replace_old_auth_allow_list(char *volname); + #endif |