diff options
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-volume-ops.c')
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 900 |
1 files changed, 297 insertions, 603 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index 7cfba3d22f7..814ab14fb27 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -7,10 +7,6 @@ later), or the GNU General Public License, version 2 (GPLv2), in all cases as published by the Free Software Foundation. */ -#ifdef HAVE_BD_XLATOR -#include <lvm2app.h> -#endif - #include <glusterfs/common-utils.h> #include <glusterfs/syscall.h> #include "cli1-xdr.h" @@ -41,234 +37,6 @@ #define glusterd_op_start_volume_args_get(dict, volname, flags) \ glusterd_op_stop_volume_args_get(dict, volname, flags) -gf_ai_compare_t -glusterd_compare_addrinfo(struct addrinfo *first, struct addrinfo *next) -{ - int ret = -1; - struct addrinfo *tmp1 = NULL; - struct addrinfo *tmp2 = NULL; - char firstip[NI_MAXHOST] = {0.}; - char nextip[NI_MAXHOST] = { - 0, - }; - - for (tmp1 = first; tmp1 != NULL; tmp1 = tmp1->ai_next) { - ret = getnameinfo(tmp1->ai_addr, tmp1->ai_addrlen, firstip, NI_MAXHOST, - NULL, 0, NI_NUMERICHOST); - if (ret) - return GF_AI_COMPARE_ERROR; - for (tmp2 = next; tmp2 != NULL; tmp2 = tmp2->ai_next) { - ret = getnameinfo(tmp2->ai_addr, tmp2->ai_addrlen, nextip, - NI_MAXHOST, NULL, 0, NI_NUMERICHOST); - if (ret) - return GF_AI_COMPARE_ERROR; - if (!strcmp(firstip, nextip)) { - return GF_AI_COMPARE_MATCH; - } - } - } - return GF_AI_COMPARE_NO_MATCH; -} - -/* Check for non optimal brick order for replicate : - * Checks if bricks belonging to a replicate volume - * are present on the same server - */ -int32_t -glusterd_check_brick_order(dict_t *dict, char *err_str) -{ - int ret = -1; - int i = 0; - int j = 0; - int k = 0; - xlator_t *this = NULL; - addrinfo_list_t *ai_list = NULL; - addrinfo_list_t *ai_list_tmp1 = NULL; - addrinfo_list_t *ai_list_tmp2 = NULL; - char *brick = NULL; - char *brick_list = NULL; - char *brick_list_dup = NULL; - char *brick_list_ptr = NULL; - char *tmpptr = NULL; - char *volname = NULL; - int32_t brick_count = 0; - int32_t type = GF_CLUSTER_TYPE_NONE; - int32_t sub_count = 0; - struct addrinfo *ai_info = NULL; - - const char failed_string[2048] = - "Failed to perform brick order " - "check. Use 'force' at the end of the command" - " if you want to override this behavior. "; - const char found_string[2048] = - "Multiple bricks of a %s " - "volume are present on the same server. This " - "setup is not optimal. Bricks should be on " - "different nodes to have best fault tolerant " - "configuration. Use 'force' at the end of the " - "command if you want to override this " - "behavior. "; - - this = THIS; - - GF_ASSERT(this); - - ai_list = MALLOC(sizeof(addrinfo_list_t)); - ai_list->info = NULL; - CDS_INIT_LIST_HEAD(&ai_list->list); - - ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get volume name"); - goto out; - } - - ret = dict_get_int32n(dict, "type", SLEN("type"), &type); - if (ret) { - snprintf(err_str, 512, "Unable to get type of volume %s", volname); - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s", - err_str); - goto out; - } - - ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &brick_list); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Bricks check : Could not " - "retrieve bricks list"); - goto out; - } - - ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Bricks check : Could not " - "retrieve brick count"); - goto out; - } - - if (type != GF_CLUSTER_TYPE_DISPERSE) { - ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"), - &sub_count); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Bricks check : Could" - " not retrieve replica count"); - goto out; - } - gf_msg_debug(this->name, 0, - "Replicate cluster type " - "found. Checking brick order."); - } else { - ret = dict_get_int32n(dict, "disperse-count", SLEN("disperse-count"), - &sub_count); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Bricks check : Could" - " not retrieve disperse count"); - goto out; - } - gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DISPERSE_CLUSTER_FOUND, - "Disperse cluster type" - " found. Checking brick order."); - } - - brick_list_dup = brick_list_ptr = gf_strdup(brick_list); - /* Resolve hostnames and get addrinfo */ - while (i < brick_count) { - ++i; - brick = strtok_r(brick_list_dup, " \n", &tmpptr); - brick_list_dup = tmpptr; - if (brick == NULL) - goto check_failed; - brick = strtok_r(brick, ":", &tmpptr); - if (brick == NULL) - goto check_failed; - ret = getaddrinfo(brick, NULL, NULL, &ai_info); - if (ret != 0) { - ret = 0; - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL, - "unable to resolve " - "host name"); - goto out; - } - ai_list_tmp1 = MALLOC(sizeof(addrinfo_list_t)); - if (ai_list_tmp1 == NULL) { - ret = 0; - gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY, - "failed to allocate " - "memory"); - freeaddrinfo(ai_info); - goto out; - } - ai_list_tmp1->info = ai_info; - cds_list_add_tail(&ai_list_tmp1->list, &ai_list->list); - ai_list_tmp1 = NULL; - } - - i = 0; - ai_list_tmp1 = cds_list_entry(ai_list->list.next, addrinfo_list_t, list); - - /* Check for bad brick order */ - while (i < brick_count) { - ++i; - ai_info = ai_list_tmp1->info; - ai_list_tmp1 = cds_list_entry(ai_list_tmp1->list.next, addrinfo_list_t, - list); - if (0 == i % sub_count) { - j = 0; - continue; - } - ai_list_tmp2 = ai_list_tmp1; - k = j; - while (k < sub_count - 1) { - ++k; - ret = glusterd_compare_addrinfo(ai_info, ai_list_tmp2->info); - if (GF_AI_COMPARE_ERROR == ret) - goto check_failed; - if (GF_AI_COMPARE_MATCH == ret) - goto found_bad_brick_order; - ai_list_tmp2 = cds_list_entry(ai_list_tmp2->list.next, - addrinfo_list_t, list); - } - ++j; - } - gf_msg_debug(this->name, 0, "Brick order okay"); - ret = 0; - goto out; - -check_failed: - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER_CHECK_FAIL, - "Failed bad brick order check"); - snprintf(err_str, sizeof(failed_string), failed_string); - ret = -1; - goto out; - -found_bad_brick_order: - gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BAD_BRKORDER, - "Bad brick order found"); - if (type == GF_CLUSTER_TYPE_DISPERSE) { - snprintf(err_str, sizeof(found_string), found_string, "disperse"); - } else { - snprintf(err_str, sizeof(found_string), found_string, "replicate"); - } - - ret = -1; -out: - ai_list_tmp2 = NULL; - GF_FREE(brick_list_ptr); - cds_list_for_each_entry(ai_list_tmp1, &ai_list->list, list) - { - if (ai_list_tmp1->info) - freeaddrinfo(ai_list_tmp1->info); - free(ai_list_tmp2); - ai_list_tmp2 = ai_list_tmp1; - } - free(ai_list_tmp2); - return ret; -} - int __glusterd_handle_create_volume(rpcsvc_request_t *req) { @@ -280,6 +48,7 @@ __glusterd_handle_create_volume(rpcsvc_request_t *req) char *bricks = NULL; char *volname = NULL; int brick_count = 0; + int thin_arbiter_count = 0; void *cli_rsp = NULL; char err_str[2048] = { 0, @@ -304,6 +73,7 @@ __glusterd_handle_create_volume(rpcsvc_request_t *req) #else char *addr_family = "inet"; #endif + glusterd_volinfo_t *volinfo = NULL; GF_ASSERT(req); @@ -357,7 +127,9 @@ __glusterd_handle_create_volume(rpcsvc_request_t *req) goto out; } - if ((ret = glusterd_check_volume_exists(volname))) { + ret = glusterd_volinfo_find(volname, &volinfo); + if (!ret) { + ret = -1; snprintf(err_str, sizeof(err_str), "Volume %s already exists", volname); gf_msg(this->name, GF_LOG_ERROR, EEXIST, GD_MSG_VOL_ALREADY_EXIST, "%s", err_str); @@ -436,6 +208,21 @@ __glusterd_handle_create_volume(rpcsvc_request_t *req) goto out; } + ret = dict_get_int32n(dict, "thin-arbiter-count", + SLEN("thin-arbiter-count"), &thin_arbiter_count); + if (thin_arbiter_count && conf->op_version < GD_OP_VERSION_7_0) { + snprintf(err_str, sizeof(err_str), + "Cannot execute command. " + "The cluster is operating at version %d. " + "Thin-arbiter volume creation is unavailable in " + "this version", + conf->op_version); + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_OP_FAILED, "%s", + err_str); + ret = -1; + goto out; + } + if (!dict_getn(dict, "force", SLEN("force"))) { gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Failed to get 'force' flag"); @@ -780,51 +567,6 @@ glusterd_handle_cli_delete_volume(rpcsvc_request_t *req) return glusterd_big_locked_handler(req, __glusterd_handle_cli_delete_volume); } -int -glusterd_handle_shd_option_for_tier(glusterd_volinfo_t *volinfo, char *value, - dict_t *dict) -{ - int count = 0; - char dict_key[64] = { - 0, - }; - int keylen; - char *key = NULL; - int ret = 0; - - key = gd_get_shd_key(volinfo->tier_info.cold_type); - if (key) { - count++; - keylen = snprintf(dict_key, sizeof(dict_key), "key%d", count); - ret = dict_set_strn(dict, dict_key, keylen, key); - if (ret) - goto out; - keylen = snprintf(dict_key, sizeof(dict_key), "value%d", count); - ret = dict_set_strn(dict, dict_key, keylen, value); - if (ret) - goto out; - } - - key = gd_get_shd_key(volinfo->tier_info.hot_type); - if (key) { - count++; - keylen = snprintf(dict_key, sizeof(dict_key), "key%d", count); - ret = dict_set_strn(dict, dict_key, keylen, key); - if (ret) - goto out; - keylen = snprintf(dict_key, sizeof(dict_key), "value%d", count); - ret = dict_set_strn(dict, dict_key, keylen, value); - if (ret) - goto out; - } - - ret = dict_set_int32n(dict, "count", SLEN("count"), count); - if (ret) - goto out; - -out: - return ret; -} static int glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict, glusterd_volinfo_t *volinfo) @@ -833,10 +575,14 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict, int ret = 0; char *key = NULL; char *value = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"), (int32_t *)&heal_op); if (ret || (heal_op == GF_SHD_OP_INVALID)) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, + "Key=heal-op", NULL); ret = -1; goto out; } @@ -864,23 +610,6 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict, value = "disable"; } - /* Convert this command to volume-set command based on volume type */ - if (volinfo->type == GF_CLUSTER_TYPE_TIER) { - switch (heal_op) { - case GF_SHD_OP_HEAL_ENABLE: - case GF_SHD_OP_HEAL_DISABLE: - ret = glusterd_handle_shd_option_for_tier(volinfo, value, dict); - if (!ret) - goto set_volume; - goto out; - /* For any other heal_op, including granular-entry heal, - * just break out of the block but don't goto out yet. - */ - default: - break; - } - } - if ((heal_op == GF_SHD_OP_HEAL_ENABLE) || (heal_op == GF_SHD_OP_HEAL_DISABLE)) { key = volgen_get_shd_key(volinfo->type); @@ -891,23 +620,34 @@ glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict, } else { key = "cluster.granular-entry-heal"; ret = dict_set_int8(dict, "is-special-key", 1); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=is-special-key", NULL); goto out; + } } ret = dict_set_strn(dict, "key1", SLEN("key1"), key); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=key1", NULL); goto out; + } ret = dict_set_strn(dict, "value1", SLEN("value1"), value); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=value1", NULL); goto out; + } ret = dict_set_int32n(dict, "count", SLEN("count"), 1); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto out; + } -set_volume: ret = glusterd_op_begin_synctask(req, GD_OP_SET_VOLUME, dict); out: @@ -930,18 +670,19 @@ __glusterd_handle_cli_heal_volume(rpcsvc_request_t *req) 0, }; + this = THIS; + GF_ASSERT(this); + GF_ASSERT(req); ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { // failed to decode msg; req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } - this = THIS; - GF_ASSERT(this); - if (cli_req.dict.dict_len) { /* Unserialize the dictionary */ dict = dict_new(); @@ -1002,8 +743,11 @@ __glusterd_handle_cli_heal_volume(rpcsvc_request_t *req) goto out; ret = dict_set_int32n(dict, "count", SLEN("count"), volinfo->brick_count); - if (ret) + if (ret) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED, + "Key=count", NULL); goto out; + } ret = glusterd_op_begin_synctask(req, GD_OP_HEAL_VOLUME, dict); @@ -1055,6 +799,7 @@ __glusterd_handle_cli_statedump_volume(rpcsvc_request_t *req) ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req); if (ret < 0) { req->rpc_err = GARBAGE_ARGS; + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL); goto out; } if (cli_req.dict.dict_len) { @@ -1133,108 +878,6 @@ glusterd_handle_cli_statedump_volume(rpcsvc_request_t *req) __glusterd_handle_cli_statedump_volume); } -#ifdef HAVE_BD_XLATOR -/* - * Validates if given VG in the brick exists or not. Also checks if VG has - * GF_XATTR_VOL_ID_KEY tag set to avoid using same VG for multiple bricks. - * Tag is checked only during glusterd_op_stage_create_volume. Tag is set during - * glusterd_validate_and_create_brickpath(). - * @brick - brick info, @check_tag - check for VG tag or not - * @msg - Error message to return to caller - */ -int -glusterd_is_valid_vg(glusterd_brickinfo_t *brick, int check_tag, char *msg) -{ - lvm_t handle = NULL; - vg_t vg = NULL; - char *vg_name = NULL; - int retval = 0; - char *p = NULL; - char *ptr = NULL; - struct dm_list *dm_lvlist = NULL; - struct dm_list *dm_seglist = NULL; - struct lvm_lv_list *lv_list = NULL; - struct lvm_property_value prop = { - 0, - }; - struct lvm_lvseg_list *seglist = NULL; - struct dm_list *taglist = NULL; - struct lvm_str_list *strl = NULL; - - handle = lvm_init(NULL); - if (!handle) { - sprintf(msg, "lvm_init failed, could not validate vg"); - return -1; - } - if (*brick->vg == '\0') { /* BD xlator has vg in brick->path */ - p = gf_strdup(brick->path); - vg_name = strtok_r(p, "/", &ptr); - } else - vg_name = brick->vg; - - vg = lvm_vg_open(handle, vg_name, "r", 0); - if (!vg) { - sprintf(msg, "no such vg: %s", vg_name); - retval = -1; - goto out; - } - if (!check_tag) - goto next; - - taglist = lvm_vg_get_tags(vg); - if (!taglist) - goto next; - - dm_list_iterate_items(strl, taglist) - { - if (!strncmp(strl->str, GF_XATTR_VOL_ID_KEY, - SLEN(GF_XATTR_VOL_ID_KEY))) { - sprintf(msg, - "VG %s is already part of" - " a brick", - vg_name); - retval = -1; - goto out; - } - } -next: - - brick->caps = CAPS_BD | CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT; - - dm_lvlist = lvm_vg_list_lvs(vg); - if (!dm_lvlist) - goto out; - - dm_list_iterate_items(lv_list, dm_lvlist) - { - dm_seglist = lvm_lv_list_lvsegs(lv_list->lv); - dm_list_iterate_items(seglist, dm_seglist) - { - prop = lvm_lvseg_get_property(seglist->lvseg, "segtype"); - if (!prop.is_valid || !prop.value.string) - continue; - if (!strcmp(prop.value.string, "thin-pool")) { - brick->caps |= CAPS_THIN; - gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_THINPOOLS_FOR_THINLVS, - "Thin Pool " - "\"%s\" will be used for thin LVs", - lvm_lv_get_name(lv_list->lv)); - break; - } - } - } - - retval = 0; -out: - if (vg) - lvm_vg_close(vg); - lvm_quit(handle); - if (p) - GF_FREE(p); - return retval; -} -#endif - /* op-sm */ int glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, @@ -1242,7 +885,6 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, { int ret = 0; char *volname = NULL; - gf_boolean_t exists = _gf_false; char *bricks = NULL; char *brick_list = NULL; char *free_ptr = NULL; @@ -1252,6 +894,8 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, int32_t local_brick_count = 0; int32_t i = 0; int32_t type = 0; + int32_t replica_count = 0; + int32_t disperse_count = 0; char *brick = NULL; char *tmpptr = NULL; xlator_t *this = NULL; @@ -1260,6 +904,7 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, uuid_t volume_uuid; char *volume_uuid_str; gf_boolean_t is_force = _gf_false; + glusterd_volinfo_t *volinfo = NULL; this = THIS; GF_ASSERT(this); @@ -1274,13 +919,11 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, goto out; } - exists = glusterd_check_volume_exists(volname); - if (exists) { + ret = glusterd_volinfo_find(volname, &volinfo); + if (!ret) { snprintf(msg, sizeof(msg), "Volume %s already exists", volname); ret = -1; goto out; - } else { - ret = 0; } ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count); @@ -1331,6 +974,64 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, } } + /*Check brick order if the volume type is replicate or disperse. If + * force at the end of command not given then check brick order. + */ + if (is_origin_glusterd(dict)) { + ret = dict_get_int32n(dict, "type", SLEN("type"), &type); + if (ret) { + snprintf(msg, sizeof(msg), + "Unable to get type of " + "volume %s", + volname); + gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s", + msg); + goto out; + } + + if (!is_force) { + if (type == GF_CLUSTER_TYPE_REPLICATE) { + ret = dict_get_int32n(dict, "replica-count", + SLEN("replica-count"), &replica_count); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Bricks check : Could" + " not retrieve replica count"); + goto out; + } + gf_msg_debug(this->name, 0, + "Replicate cluster type " + "found. Checking brick order."); + ret = glusterd_check_brick_order(dict, msg, type, &volname, + &bricks, &brick_count, + replica_count); + } else if (type == GF_CLUSTER_TYPE_DISPERSE) { + ret = dict_get_int32n(dict, "disperse-count", + SLEN("disperse-count"), &disperse_count); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Bricks check : Could" + " not retrieve disperse count"); + goto out; + } + gf_msg_debug(this->name, 0, + "Disperse cluster type" + " found. Checking brick order."); + ret = glusterd_check_brick_order(dict, msg, type, &volname, + &bricks, &brick_count, + disperse_count); + } + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER, + "Not creating the volume because of " + "bad brick order. %s", + msg); + *op_errstr = gf_strdup(msg); + goto out; + } + } + } + while (i < brick_count) { i++; brick = strtok_r(brick_list, " \n", &tmpptr); @@ -1373,13 +1074,6 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, } if (!gf_uuid_compare(brick_info->uuid, MY_UUID)) { -#ifdef HAVE_BD_XLATOR - if (brick_info->vg[0]) { - ret = glusterd_is_valid_vg(brick_info, 1, msg); - if (ret) - goto out; - } -#endif ret = glusterd_validate_and_create_brickpath( brick_info, volume_uuid, volname, op_errstr, is_force, _gf_false); @@ -1417,36 +1111,6 @@ glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr, brick_info = NULL; } - /*Check brick order if the volume type is replicate or disperse. If - * force at the end of command not given then check brick order. - */ - if (is_origin_glusterd(dict)) { - ret = dict_get_int32n(dict, "type", SLEN("type"), &type); - if (ret) { - snprintf(msg, sizeof(msg), - "Unable to get type of " - "volume %s", - volname); - gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s", - msg); - goto out; - } - - if (!is_force) { - if ((type == GF_CLUSTER_TYPE_REPLICATE) || - (type == GF_CLUSTER_TYPE_DISPERSE)) { - ret = glusterd_check_brick_order(dict, msg); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER, - "Not " - "creating volume because of " - "bad brick order"); - goto out; - } - } - } - } - ret = dict_set_int32n(rsp_dict, "brick_count", SLEN("brick_count"), local_brick_count); if (ret) { @@ -1478,20 +1142,32 @@ glusterd_op_stop_volume_args_get(dict_t *dict, char **volname, int *flags) this = THIS; GF_ASSERT(this); - if (!dict || !volname || !flags) + if (!dict) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL); + goto out; + } + + if (!volname) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL); + goto out; + } + + if (!flags) { + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } ret = dict_get_strn(dict, "volname", SLEN("volname"), volname); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get volume name"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); goto out; } ret = dict_get_int32n(dict, "flags", SLEN("flags"), flags); if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get flags"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=flags", NULL); goto out; } out: @@ -1504,27 +1180,29 @@ glusterd_op_statedump_volume_args_get(dict_t *dict, char **volname, { int ret = -1; - if (!dict || !volname || !options || !option_cnt) + if (!dict || !volname || !options || !option_cnt) { + gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL); goto out; + } ret = dict_get_strn(dict, "volname", SLEN("volname"), volname); if (ret) { - gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get volname"); + gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); goto out; } ret = dict_get_strn(dict, "options", SLEN("options"), options); if (ret) { - gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get options"); + gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=options", NULL); goto out; } ret = dict_get_int32n(dict, "option_cnt", SLEN("option_cnt"), option_cnt); if (ret) { - gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Unable to get option count"); + gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=option_cnt", NULL); goto out; } @@ -1541,7 +1219,6 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) int flags = 0; int32_t brick_count = 0; int32_t local_brick_count = 0; - gf_boolean_t exists = _gf_false; glusterd_volinfo_t *volinfo = NULL; glusterd_brickinfo_t *brickinfo = NULL; char msg[2048] = { @@ -1558,7 +1235,6 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) char xattr_volid[50] = { 0, }; - int caps = 0; int32_t len = 0; this = THIS; @@ -1571,16 +1247,9 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) if (ret) goto out; - exists = glusterd_check_volume_exists(volname); - - if (!exists) { - snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname); - ret = -1; - goto out; - } - ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { + snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname); gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, FMTSTR_CHECK_VOL_EXISTS, volname); goto out; @@ -1720,22 +1389,6 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) local_brick_count = brick_count; } } - -#ifdef HAVE_BD_XLATOR - if (brickinfo->vg[0]) - caps = CAPS_BD | CAPS_THIN | CAPS_OFFLOAD_COPY | - CAPS_OFFLOAD_SNAPSHOT; - /* Check for VG/thin pool if its BD volume */ - if (brickinfo->vg[0]) { - ret = glusterd_is_valid_vg(brickinfo, 0, msg); - if (ret) - goto out; - /* if anyone of the brick does not have thin support, - disable it for entire volume */ - caps &= brickinfo->caps; - } else - caps = 0; -#endif } ret = dict_set_int32n(rsp_dict, "brick_count", SLEN("brick_count"), @@ -1746,7 +1399,6 @@ glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) goto out; } - volinfo->caps = caps; ret = 0; out: if (volinfo) @@ -1766,7 +1418,6 @@ glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr) int ret = -1; char *volname = NULL; int flags = 0; - gf_boolean_t exists = _gf_false; glusterd_volinfo_t *volinfo = NULL; char msg[2048] = {0}; xlator_t *this = NULL; @@ -1778,15 +1429,11 @@ glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr) GF_ASSERT(this); ret = glusterd_op_stop_volume_args_get(dict, &volname, &flags); - if (ret) - goto out; - - exists = glusterd_check_volume_exists(volname); - - if (!exists) { - snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname); - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg); - ret = -1; + if (ret) { + snprintf(msg, sizeof(msg), "Failed to get details of volume %s", + volname); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_STOP_ARGS_GET_FAILED, + "Volume name=%s", volname, NULL); goto out; } @@ -1823,6 +1470,18 @@ glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr) goto out; } + ret = glusterd_check_ganesha_export(volinfo); + if (ret) { + ret = ganesha_manage_export(dict, "off", _gf_false, op_errstr); + if (ret) { + gf_msg(THIS->name, GF_LOG_WARNING, 0, + GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL, + "Could not " + "unexport volume via NFS-Ganesha"); + ret = 0; + } + } + if (glusterd_is_defrag_on(volinfo)) { snprintf(msg, sizeof(msg), "rebalance session is " @@ -1846,7 +1505,6 @@ glusterd_op_stage_delete_volume(dict_t *dict, char **op_errstr) { int ret = 0; char *volname = NULL; - gf_boolean_t exists = _gf_false; glusterd_volinfo_t *volinfo = NULL; char msg[2048] = {0}; xlator_t *this = NULL; @@ -1861,15 +1519,6 @@ glusterd_op_stage_delete_volume(dict_t *dict, char **op_errstr) goto out; } - exists = glusterd_check_volume_exists(volname); - if (!exists) { - snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname); - ret = -1; - goto out; - } else { - ret = 0; - } - ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname); @@ -1928,7 +1577,7 @@ static int glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, dict_t *dict, char **op_errstr) { - glusterd_conf_t *priv = NULL; + glusterd_svc_t *svc = NULL; gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID; int ret = 0; char msg[2408] = { @@ -1938,7 +1587,6 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, "Self-heal daemon is not running. " "Check self-heal daemon log file."; - priv = this->private; ret = dict_get_int32n(dict, "heal-op", SLEN("heal-op"), (int32_t *)&heal_op); if (ret) { @@ -1947,6 +1595,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, goto out; } + svc = &(volinfo->shd.svc); switch (heal_op) { case GF_SHD_OP_INVALID: case GF_SHD_OP_HEAL_ENABLE: /* This op should be handled in volume-set*/ @@ -1976,7 +1625,7 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, goto out; } - if (!priv->shd_svc.online) { + if (!svc->online) { ret = -1; *op_errstr = gf_strdup(offline_msg); goto out; @@ -1990,14 +1639,16 @@ glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo, if (!glusterd_is_volume_replicate(volinfo)) { ret = -1; snprintf(msg, sizeof(msg), - "Volume %s is not of type " + "This command is supported " + "for only volume of replicated " + "type. Volume %s is not of type " "replicate", volinfo->volname); *op_errstr = gf_strdup(msg); goto out; } - if (!priv->shd_svc.online) { + if (!svc->online) { ret = -1; *op_errstr = gf_strdup(offline_msg); goto out; @@ -2066,14 +1717,15 @@ glusterd_op_stage_heal_volume(dict_t *dict, char **op_errstr) if (!glusterd_is_volume_started(volinfo)) { ret = -1; snprintf(msg, sizeof(msg), "Volume %s is not started.", volname); - gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_STARTED, "%s", - msg); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_STARTED, + "Volume=%s", volname, NULL); *op_errstr = gf_strdup(msg); goto out; } opt_dict = volinfo->dict; if (!opt_dict) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, NULL); ret = 0; goto out; } @@ -2129,6 +1781,8 @@ glusterd_op_stage_statedump_volume(dict_t *dict, char **op_errstr) ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname); + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, + "Volume=%s", volname, NULL); goto out; } @@ -2249,25 +1903,30 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr) glusterd_volinfo_t *volinfo = NULL; gf_boolean_t vol_added = _gf_false; glusterd_brickinfo_t *brickinfo = NULL; + glusterd_brickinfo_t *ta_brickinfo = NULL; xlator_t *this = NULL; char *brick = NULL; + char *ta_brick = NULL; int32_t count = 0; int32_t i = 1; char *bricks = NULL; + char *ta_bricks = NULL; char *brick_list = NULL; + char *ta_brick_list = NULL; char *free_ptr = NULL; + char *ta_free_ptr = NULL; char *saveptr = NULL; + char *ta_saveptr = NULL; char *trans_type = NULL; char *str = NULL; char *username = NULL; char *password = NULL; - int caps = 0; int brickid = 0; char msg[1024] __attribute__((unused)) = { 0, }; char *brick_mount_dir = NULL; - char key[PATH_MAX] = ""; + char key[64] = ""; char *address_family_str = NULL; struct statvfs brickstat = { 0, @@ -2287,8 +1946,6 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr) goto out; } - pthread_mutex_init(&volinfo->store_volinfo_lock, NULL); - ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); if (ret) { @@ -2375,6 +2032,20 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr) /* coverity[unused_value] arbiter count is optional */ ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"), &volinfo->arbiter_count); + ret = dict_get_int32n(dict, "thin-arbiter-count", + SLEN("thin-arbiter-count"), + &volinfo->thin_arbiter_count); + if (volinfo->thin_arbiter_count) { + ret = dict_get_strn(dict, "ta-brick", SLEN("ta-brick"), &ta_bricks); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get thin arbiter brick for " + "volume %s", + volname); + goto out; + } + } + } else if (GF_CLUSTER_TYPE_DISPERSE == volinfo->type) { ret = dict_get_int32n(dict, "disperse-count", SLEN("disperse-count"), &volinfo->disperse_count); @@ -2463,6 +2134,38 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr) volinfo->transport_type = GF_TRANSPORT_BOTH_TCP_RDMA; } + if (ta_bricks) { + ta_brick_list = gf_strdup(ta_bricks); + ta_free_ptr = ta_brick_list; + } + + if (volinfo->thin_arbiter_count) { + ta_brick = strtok_r(ta_brick_list + 1, " \n", &ta_saveptr); + + count = 1; + brickid = volinfo->replica_count; + /* assign brickid to ta_bricks + * Following loop runs for number of subvols times. Although + * there is only one ta-brick for a volume but the volume fuse volfile + * requires an entry of ta-brick for each subvolume. Also, the ta-brick + * id needs to be adjusted according to the subvol count. + * For eg- For first subvolume ta-brick id is volname-ta-2, for second + * subvol ta-brick id is volname-ta-5. + */ + while (count <= volinfo->subvol_count) { + ret = glusterd_brickinfo_new_from_brick(ta_brick, &ta_brickinfo, + _gf_false, op_errstr); + if (ret) + goto out; + + GLUSTERD_ASSIGN_BRICKID_TO_TA_BRICKINFO(ta_brickinfo, volinfo, + brickid); + cds_list_add_tail(&ta_brickinfo->brick_list, &volinfo->ta_bricks); + count++; + brickid += volinfo->replica_count + 1; + } + } + if (bricks) { brick_list = gf_strdup(bricks); free_ptr = brick_list; @@ -2481,7 +2184,10 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr) op_errstr); if (ret) goto out; - + if (volinfo->thin_arbiter_count == 1 && + (brickid + 1) % (volinfo->replica_count + 1) == 0) { + brickid = brickid + 1; + } GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO(brickinfo, volinfo, brickid++); ret = glusterd_resolve_brick(brickinfo); @@ -2518,25 +2224,6 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr) goto out; } brickinfo->statfs_fsid = brickstat.f_fsid; - -#ifdef HAVE_BD_XLATOR - if (brickinfo->vg[0]) { - caps = CAPS_BD | CAPS_THIN | CAPS_OFFLOAD_COPY | - CAPS_OFFLOAD_SNAPSHOT; - ret = glusterd_is_valid_vg(brickinfo, 0, msg); - if (ret) { - gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_VG, "%s", - msg); - goto out; - } - - /* if anyone of the brick does not have thin - support, disable it for entire volume */ - caps &= brickinfo->caps; - } else { - caps = 0; - } -#endif } cds_list_add_tail(&brickinfo->brick_list, &volinfo->bricks); @@ -2569,8 +2256,6 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr) gd_update_volume_op_versions(volinfo); - volinfo->caps = caps; - ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); if (ret) { glusterd_store_delete_volume(volinfo); @@ -2593,6 +2278,7 @@ glusterd_op_create_volume(dict_t *dict, char **op_errstr) out: GF_FREE(free_ptr); + GF_FREE(ta_free_ptr); if (!vol_added && volinfo) glusterd_volinfo_unref(volinfo); return ret; @@ -2645,6 +2331,7 @@ glusterd_start_volume(glusterd_volinfo_t *volinfo, int flags, gf_boolean_t wait) attach_brick_callback can also call store_volinfo for same volume to update volinfo on disk */ + /* coverity[ORDER_REVERSAL] */ LOCK(&volinfo->lock); ret = glusterd_store_volinfo(volinfo, verincrement); UNLOCK(&volinfo->lock); @@ -2674,6 +2361,8 @@ glusterd_op_start_volume(dict_t *dict, char **op_errstr) xlator_t *this = NULL; glusterd_conf_t *conf = NULL; glusterd_svc_t *svc = NULL; + char *str = NULL; + gf_boolean_t option = _gf_false; this = THIS; GF_ASSERT(this); @@ -2731,6 +2420,29 @@ glusterd_op_start_volume(dict_t *dict, char **op_errstr) } } + ret = dict_get_str(conf->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str); + if (ret != 0) { + gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED, + "Global dict not present."); + ret = 0; + + } else { + ret = gf_string2boolean(str, &option); + /* Check if the feature is enabled and set nfs-disable to true */ + if (option) { + gf_msg_debug(this->name, 0, "NFS-Ganesha is enabled"); + /* Gluster-nfs should not start when NFS-Ganesha is enabled*/ + ret = dict_set_str(volinfo->dict, NFS_DISABLE_MAP_KEY, "on"); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Failed to set nfs.disable for" + "volume %s", + volname); + goto out; + } + } + } + ret = glusterd_start_volume(volinfo, flags, _gf_true); if (ret) goto out; @@ -2741,25 +2453,6 @@ glusterd_op_start_volume(dict_t *dict, char **op_errstr) if (ret) goto out; } - if (conf->op_version <= GD_OP_VERSION_3_7_6) { - /* - * Starting tier daemon on originator node will fail if - * at least one of the peer host brick for the volume. - * Because The bricks in the peer haven't started when you - * commit on originator node. - * Please upgrade to version greater than GD_OP_VERSION_3_7_6 - */ - if (volinfo->type == GF_CLUSTER_TYPE_TIER) { - if (volinfo->rebal.op != GD_OP_REMOVE_BRICK) { - glusterd_defrag_info_set(volinfo, dict, - GF_DEFRAG_CMD_START_TIER, - GF_DEFRAG_CMD_START, GD_OP_REBALANCE); - } - glusterd_restart_rebalance_for_volume(volinfo); - } - } else { - /* Starting tier daemon is moved into post validate phase */ - } svc = &(volinfo->gfproxyd.svc); ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT); @@ -2798,16 +2491,6 @@ glusterd_stop_volume(glusterd_volinfo_t *volinfo) } } - /* call tier manager before the voluem status is set as stopped - * as tier uses that as a check in the manager - * */ - if (volinfo->type == GF_CLUSTER_TYPE_TIER) { - svc = &(volinfo->tierd.svc); - ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT); - if (ret) - goto out; - } - glusterd_set_volume_status(volinfo, GLUSTERD_STATUS_STOPPED); ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT); @@ -2899,6 +2582,15 @@ glusterd_op_delete_volume(dict_t *dict) goto out; } + if (glusterd_check_ganesha_export(volinfo) && is_origin_glusterd(dict)) { + ret = manage_export_config(volname, "off", NULL); + if (ret) + gf_msg(this->name, GF_LOG_WARNING, 0, 0, + "Could not delete ganesha export conf file " + "for %s", + volname); + } + ret = glusterd_delete_volume(volinfo); out: gf_msg_debug(this->name, 0, "returning %d", ret); @@ -2933,16 +2625,16 @@ glusterd_op_statedump_volume(dict_t *dict, char **op_errstr) if (ret) goto out; gf_msg_debug("glusterd", 0, "Performing statedump on volume %s", volname); - if (strstr(options, "nfs") != NULL) { - ret = glusterd_nfs_statedump(options, option_cnt, op_errstr); + if (strstr(options, "quotad")) { + ret = glusterd_quotad_statedump(options, option_cnt, op_errstr); if (ret) goto out; - - } else if (strstr(options, "quotad")) { - ret = glusterd_quotad_statedump(options, option_cnt, op_errstr); +#ifdef BUILD_GNFS + } else if (strstr(options, "nfs") != NULL) { + ret = glusterd_nfs_statedump(options, option_cnt, op_errstr); if (ret) goto out; - +#endif } else if (strstr(options, "client")) { ret = glusterd_client_statedump(volname, options, option_cnt, op_errstr); @@ -3092,8 +2784,7 @@ glusterd_clearlocks_mount(glusterd_volinfo_t *volinfo, char **xl_opts, runner_add_args(&runner, SBIN_DIR "/glusterfs", "-f", NULL); runner_argprintf(&runner, "%s", client_volfpath); runner_add_arg(&runner, "-l"); - runner_argprintf(&runner, - DEFAULT_LOG_FILE_DIRECTORY "/%s-clearlocks-mnt.log", + runner_argprintf(&runner, "%s/%s-clearlocks-mnt.log", priv->logdir, volinfo->volname); if (volinfo->memory_accounting) runner_add_arg(&runner, "--mem-accounting"); @@ -3206,33 +2897,35 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) char *mntpt = NULL; char **xl_opts = NULL; glusterd_volinfo_t *volinfo = NULL; + xlator_t *this = THIS; + GF_ASSERT(this); ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get volume name"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Key=volname", NULL); goto out; } gf_msg_debug("glusterd", 0, "Performing clearlocks on volume %s", volname); ret = dict_get_strn(dict, "path", SLEN("path"), &path); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get path"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=path", + NULL); goto out; } ret = dict_get_strn(dict, "kind", SLEN("kind"), &kind); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get kind"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=kind", + NULL); goto out; } ret = dict_get_strn(dict, "type", SLEN("type"), &type); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, - "Failed to get type"); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=type", + NULL); goto out; } @@ -3240,10 +2933,9 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) if (ret) ret = 0; - gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD, - "Received clear-locks request for " - "volume %s with kind %s type %s and options %s", - volname, kind, type, opts); + gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD, + "Volume=%s, Kind=%s, Type=%s, Options=%s", volname, kind, type, + opts, NULL); if (opts) ret = gf_asprintf(&cmd_str, GF_XATTR_CLRLK_CMD ".t%s.k%s.%s", type, @@ -3256,22 +2948,25 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) ret = glusterd_volinfo_find(volname, &volinfo); if (ret) { snprintf(msg, sizeof(msg), "Volume %s doesn't exist.", volname); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "Volume=%s", + volname, NULL); goto out; } xl_opts = GF_CALLOC(volinfo->brick_count + 1, sizeof(char *), gf_gld_mt_charptr); - if (!xl_opts) + if (!xl_opts) { + gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL); goto out; + } ret = glusterd_clearlocks_get_local_client_ports(volinfo, xl_opts); if (ret) { snprintf(msg, sizeof(msg), "Couldn't get port numbers of " "local bricks"); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRK_PORT_NUM_GET_FAIL, "%s", - msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRK_PORT_NUM_GET_FAIL, + NULL); goto out; } @@ -3280,8 +2975,8 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) snprintf(msg, sizeof(msg), "Creating mount directory " "for clear-locks failed."); - gf_msg(THIS->name, GF_LOG_ERROR, 0, - GD_MSG_CLRLOCKS_MOUNTDIR_CREATE_FAIL, "%s", msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, + GD_MSG_CLRLOCKS_MOUNTDIR_CREATE_FAIL, NULL); goto out; } @@ -3290,16 +2985,15 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) snprintf(msg, sizeof(msg), "Failed to mount clear-locks " "maintenance client."); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_CLRLOCKS_CLNT_MOUNT_FAIL, - "%s", msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLRLOCKS_CLNT_MOUNT_FAIL, + NULL); goto out; } ret = glusterd_clearlocks_send_cmd(volinfo, cmd_str, path, result, msg, sizeof(msg), mntpt); if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_CLRCLK_SND_CMD_FAIL, "%s", - msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLRCLK_SND_CMD_FAIL, NULL); goto umount; } @@ -3310,16 +3004,16 @@ glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict) snprintf(msg, sizeof(msg), "Failed to set clear-locks " "result"); - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "%s", msg); + gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Key=lk-summary", NULL); } umount: glusterd_clearlocks_unmount(volinfo, mntpt); if (glusterd_clearlocks_rmdir_mount(volinfo, mntpt)) - gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_CLRLOCKS_CLNT_UMOUNT_FAIL, - "Couldn't unmount " - "clear-locks mount point"); + gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_CLRLOCKS_CLNT_UMOUNT_FAIL, + NULL); out: if (ret) |
