summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-brick-ops.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c1314
1 files changed, 344 insertions, 970 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index e6ea9c9881d..e56cd0e6c74 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -7,7 +7,7 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "cli1-xdr.h"
#include "xdr-generic.h"
#include "glusterd.h"
@@ -20,42 +20,12 @@
#include "glusterd-svc-helper.h"
#include "glusterd-messages.h"
#include "glusterd-server-quorum.h"
-#include "run.h"
-#include "glusterd-volgen.h"
-#include "syscall.h"
+#include <glusterfs/run.h>
+#include <glusterfs/syscall.h>
#include <sys/signal.h>
/* misc */
-gf_boolean_t
-glusterd_is_tiering_supported(char *op_errstr)
-{
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
- gf_boolean_t supported = _gf_false;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
-
- conf = this->private;
- GF_VALIDATE_OR_GOTO(this->name, conf, out);
-
- if (conf->op_version < GD_OP_VERSION_3_7_0)
- goto out;
-
- supported = _gf_true;
-
-out:
- if (!supported && op_errstr != NULL && conf)
- sprintf(op_errstr,
- "Tier operation failed. The cluster is "
- "operating at version %d. Tiering"
- " is unavailable in this version.",
- conf->op_version);
-
- return supported;
-}
-
/* In this function, we decide, based on the 'count' of the brick,
where to add it in the current volume. 'count' tells us already
how many of the given bricks are added. other argument are self-
@@ -118,110 +88,6 @@ insert_brick:
}
static int
-gd_addbr_validate_stripe_count(glusterd_volinfo_t *volinfo, int stripe_count,
- int total_bricks, int *type, char *err_str,
- size_t err_len)
-{
- int ret = -1;
-
- switch (volinfo->type) {
- case GF_CLUSTER_TYPE_NONE:
- if ((volinfo->brick_count * stripe_count) == total_bricks) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_STRIPE;
- gf_msg(THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'distribute' to 'stripe'",
- volinfo->volname);
- ret = 0;
- goto out;
- } else {
- snprintf(err_str, err_len,
- "Incorrect number of "
- "bricks (%d) supplied for stripe count (%d).",
- (total_bricks - volinfo->brick_count), stripe_count);
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
- goto out;
- }
- break;
- case GF_CLUSTER_TYPE_REPLICATE:
- if (!(total_bricks % (volinfo->replica_count * stripe_count))) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE;
- gf_msg(THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'replicate' to 'replicate-stripe'",
- volinfo->volname);
- ret = 0;
- goto out;
- } else {
- snprintf(err_str, err_len,
- "Incorrect number of "
- "bricks (%d) supplied for changing volume's "
- "stripe count to %d, need at least %d bricks",
- (total_bricks - volinfo->brick_count), stripe_count,
- (volinfo->replica_count * stripe_count));
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
- goto out;
- }
- break;
- case GF_CLUSTER_TYPE_STRIPE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
- if (stripe_count < volinfo->stripe_count) {
- snprintf(err_str, err_len,
- "Incorrect stripe count (%d) supplied. "
- "Volume already has stripe count (%d)",
- stripe_count, volinfo->stripe_count);
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
- goto out;
- }
- if (stripe_count == volinfo->stripe_count) {
- if (!(total_bricks % volinfo->dist_leaf_count)) {
- /* its same as the one which exists */
- ret = 1;
- goto out;
- }
- }
- if (stripe_count > volinfo->stripe_count) {
- /* We have to make sure before and after 'add-brick',
- the number or subvolumes for distribute will remain
- same, when stripe count is given */
- if ((volinfo->brick_count *
- (stripe_count * volinfo->replica_count)) ==
- (total_bricks * volinfo->dist_leaf_count)) {
- /* Change the dist_leaf_count */
- gf_msg(THIS->name, GF_LOG_INFO, 0,
- GD_MSG_STRIPE_COUNT_CHANGE_INFO,
- "Changing the stripe count of "
- "volume %s from %d to %d",
- volinfo->volname, volinfo->stripe_count,
- stripe_count);
- ret = 0;
- goto out;
- }
- }
- break;
- case GF_CLUSTER_TYPE_DISPERSE:
- snprintf(err_str, err_len,
- "Volume %s cannot be converted "
- "from dispersed to striped-"
- "dispersed",
- volinfo->volname);
- gf_msg(THIS->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED,
- "%s", err_str);
- goto out;
- }
-
-out:
- return ret;
-}
-
-static int
gd_addbr_validate_replica_count(glusterd_volinfo_t *volinfo, int replica_count,
int arbiter_count, int total_bricks, int *type,
char *err_str, int err_len)
@@ -252,32 +118,7 @@ gd_addbr_validate_replica_count(glusterd_volinfo_t *volinfo, int replica_count,
goto out;
}
break;
- case GF_CLUSTER_TYPE_STRIPE:
- if (!(total_bricks % (volinfo->dist_leaf_count * replica_count))) {
- /* Change the volume type */
- *type = GF_CLUSTER_TYPE_STRIPE_REPLICATE;
- gf_msg(THIS->name, GF_LOG_INFO, 0,
- GD_MSG_VOL_TYPE_CHANGING_INFO,
- "Changing the type of volume %s from "
- "'stripe' to 'replicate-stripe'",
- volinfo->volname);
- ret = 0;
- goto out;
- } else {
- snprintf(err_str, err_len,
- "Incorrect number of "
- "bricks (%d) supplied for changing volume's "
- "replica count to %d, need at least %d "
- "bricks",
- (total_bricks - volinfo->brick_count), replica_count,
- (volinfo->dist_leaf_count * replica_count));
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
- goto out;
- }
- break;
case GF_CLUSTER_TYPE_REPLICATE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
if (replica_count < volinfo->replica_count) {
snprintf(err_str, err_len,
"Incorrect replica count (%d) supplied. "
@@ -341,25 +182,22 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
{
int ret = -1;
int replica_nodes = 0;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
switch (volinfo->type) {
- case GF_CLUSTER_TYPE_TIER:
- ret = 1;
- goto out;
-
case GF_CLUSTER_TYPE_NONE:
- case GF_CLUSTER_TYPE_STRIPE:
case GF_CLUSTER_TYPE_DISPERSE:
snprintf(err_str, err_len,
"replica count (%d) option given for non replicate "
"volume %s",
replica_count, volinfo->volname);
- gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_REPLICA, "%s",
- err_str);
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ARGUMENT,
+ err_str, NULL);
goto out;
case GF_CLUSTER_TYPE_REPLICATE:
- case GF_CLUSTER_TYPE_STRIPE_REPLICATE:
/* in remove brick, you can only reduce the replica count */
if (replica_count > volinfo->replica_count) {
snprintf(err_str, err_len,
@@ -367,8 +205,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
"than volume %s's replica count (%d)",
replica_count, volinfo->volname,
volinfo->replica_count);
- gf_msg(THIS->name, GF_LOG_WARNING, EINVAL, GD_MSG_INVALID_ENTRY,
- "%s", err_str);
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL,
+ GD_MSG_INVALID_ARGUMENT, err_str, NULL);
goto out;
}
if (replica_count == volinfo->replica_count) {
@@ -382,8 +220,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
"(or %dxN)",
brick_count, volinfo->dist_leaf_count,
volinfo->dist_leaf_count);
- gf_msg(THIS->name, GF_LOG_WARNING, EINVAL,
- GD_MSG_INVALID_ENTRY, "%s", err_str);
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL,
+ GD_MSG_INVALID_ARGUMENT, err_str, NULL);
goto out;
}
ret = 1;
@@ -398,6 +236,8 @@ gd_rmbr_validate_replica_count(glusterd_volinfo_t *volinfo,
"need %d(xN) bricks for reducing replica "
"count of the volume from %d to %d",
replica_nodes, volinfo->replica_count, replica_count);
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL,
+ GD_MSG_INVALID_ARGUMENT, err_str, NULL);
goto out;
}
break;
@@ -447,6 +287,7 @@ __glusterd_handle_add_brick(rpcsvc_request_t *req)
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
snprintf(err_str, sizeof(err_str), "Garbage args received");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
@@ -482,10 +323,13 @@ __glusterd_handle_add_brick(rpcsvc_request_t *req)
goto out;
}
- if (!(ret = glusterd_check_volume_exists(volname))) {
- ret = -1;
- snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
+ ret = glusterd_volinfo_find(volname, &volinfo);
+ if (ret) {
+ snprintf(err_str, sizeof(err_str),
+ "Unable to get volinfo "
+ "for volume name %s",
+ volname);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s",
err_str);
goto out;
}
@@ -527,57 +371,8 @@ __glusterd_handle_add_brick(rpcsvc_request_t *req)
goto out;
}
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volinfo "
- "for volume name %s",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s",
- err_str);
- goto out;
- }
-
total_bricks = volinfo->brick_count + brick_count;
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- snprintf(err_str, sizeof(err_str), "Volume %s is already a tier.",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_TIER, "%s",
- err_str);
- ret = -1;
- goto out;
- }
-
- if (glusterd_is_tiering_supported(err_str) == _gf_false) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
- "Tiering not supported at this version");
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "failed to get type from dictionary");
- goto out;
- }
-
- goto brick_val;
- }
-
- ret = glusterd_disallow_op_for_tier(volinfo, GD_OP_ADD_BRICK, -1);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Add-brick operation is "
- "not supported on a tiered volume %s",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_UNSUPPORTED, "%s",
- err_str);
- goto out;
- }
-
if (!stripe_count && !replica_count) {
if (volinfo->type == GF_CLUSTER_TYPE_NONE)
goto brick_val;
@@ -601,31 +396,6 @@ __glusterd_handle_add_brick(rpcsvc_request_t *req)
count is given */
}
- /* These bricks needs to be added one per a replica or stripe volume */
- if (stripe_count) {
- ret = gd_addbr_validate_stripe_count(volinfo, stripe_count,
- total_bricks, &type, err_str,
- sizeof(err_str));
- if (ret == -1) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COUNT_VALIDATE_FAILED,
- "%s", err_str);
- goto out;
- }
-
- /* if stripe count is same as earlier, set it back to 0 */
- if (ret == 1)
- stripe_count = 0;
-
- ret = dict_set_int32n(dict, "stripe-count", SLEN("stripe-count"),
- stripe_count);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "failed to set the stripe-count in dict");
- goto out;
- }
- goto brick_val;
- }
-
ret = gd_addbr_validate_replica_count(volinfo, replica_count, arbiter_count,
total_bricks, &type, err_str,
sizeof(err_str));
@@ -745,6 +515,8 @@ subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
int i = 0;
int ret = 0;
int count = volinfo->replica_count - replica_count;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
if (replica_count && subvols) {
for (i = 0; i < volinfo->subvol_count; i++) {
@@ -754,6 +526,8 @@ subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
"Remove exactly %d"
" brick(s) from each subvolume.",
count);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_BRICK_SUBVOL_VERIFY_FAIL, err_str, NULL);
break;
}
}
@@ -767,6 +541,8 @@ subvol_matcher_verify(int *subvols, glusterd_volinfo_t *volinfo, char *err_str,
ret = -1;
snprintf(err_str, err_len, "Bricks not from same subvol for %s",
vol_type);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_BRICK_SUBVOL_VERIFY_FAIL, err_str, NULL);
break;
}
} while (++i < volinfo->subvol_count);
@@ -780,43 +556,6 @@ subvol_matcher_destroy(int *subvols)
GF_FREE(subvols);
}
-int
-glusterd_set_detach_bricks(dict_t *dict, glusterd_volinfo_t *volinfo)
-{
- char key[64] = "";
- char value[2048] = ""; /* hostname + path */
- int brick_num = 0;
- int hot_brick_num = 0;
- glusterd_brickinfo_t *brickinfo;
- int ret = 0;
- int32_t len = 0;
-
- /* cold tier bricks at tail of list so use reverse iteration */
- cds_list_for_each_entry_reverse(brickinfo, &volinfo->bricks, brick_list)
- {
- brick_num++;
- if (brick_num > volinfo->tier_info.cold_brick_count) {
- hot_brick_num++;
- sprintf(key, "brick%d", hot_brick_num);
- len = snprintf(value, sizeof(value), "%s:%s", brickinfo->hostname,
- brickinfo->path);
- if ((len < 0) || (len >= sizeof(value))) {
- return -1;
- }
-
- ret = dict_set_str(dict, key, strdup(value));
- if (ret)
- break;
- }
- }
-
- ret = dict_set_int32n(dict, "count", SLEN("count"), hot_brick_num);
- if (ret)
- return -1;
-
- return hot_brick_num;
-}
-
static int
glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
int32_t count, int32_t replica_count,
@@ -828,9 +567,11 @@ glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_brickinfo_t *last = NULL;
char *arbiter_array = NULL;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT(this);
- if ((volinfo->type != GF_CLUSTER_TYPE_REPLICATE) &&
- (volinfo->type != GF_CLUSTER_TYPE_STRIPE_REPLICATE))
+ if (volinfo->type != GF_CLUSTER_TYPE_REPLICATE)
goto out;
if (!replica_count || !volinfo->arbiter_count)
@@ -847,6 +588,8 @@ glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
"Remove arbiter "
"brick(s) only when converting from "
"arbiter to replica 2 subvolume.");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_REMOVE_ARBITER_BRICK, err_str, NULL);
ret = -1;
goto out;
}
@@ -870,7 +613,9 @@ glusterd_remove_brick_validate_arbiters(glusterd_volinfo_t *volinfo,
snprintf(err_str, err_len,
"Removed bricks "
"must contain arbiter when converting"
- " to plain distrubute.");
+ " to plain distribute.");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_REMOVE_ARBITER_BRICK, err_str, NULL);
ret = -1;
break;
}
@@ -894,6 +639,7 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
char key[64] = "";
int keylen;
int i = 1;
+ glusterd_conf_t *conf = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
glusterd_brickinfo_t **brickinfo_list = NULL;
@@ -912,12 +658,15 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
GF_ASSERT(req);
this = THIS;
GF_ASSERT(this);
+ conf = this->private;
+ GF_ASSERT(conf);
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
// failed to decode msg;
req->rpc_err = GARBAGE_ARGS;
snprintf(err_str, sizeof(err_str), "Received garbage args");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
goto out;
}
@@ -970,14 +719,6 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
goto out;
}
- if ((volinfo->type == GF_CLUSTER_TYPE_TIER) &&
- (glusterd_is_tiering_supported(err_str) == _gf_false)) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
- "Tiering not supported at this version");
- ret = -1;
- goto out;
- }
-
ret = dict_get_int32n(dict, "command", SLEN("command"), &cmd);
if (ret) {
snprintf(err_str, sizeof(err_str),
@@ -988,15 +729,6 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
goto out;
}
- ret = glusterd_disallow_op_for_tier(volinfo, GD_OP_REMOVE_BRICK, cmd);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Removing brick from a Tier volume is not allowed");
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_UNSUPPORTED, "%s",
- err_str);
- goto out;
- }
-
ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
&replica_count);
if (!ret) {
@@ -1028,39 +760,12 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
/* 'vol_type' is used for giving the meaning full error msg for user */
if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
strcpy(vol_type, "replica");
- } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE) {
- strcpy(vol_type, "stripe");
- } else if (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) {
- strcpy(vol_type, "stripe-replicate");
} else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
strcpy(vol_type, "disperse");
} else {
strcpy(vol_type, "distribute");
}
- /* Do not allow remove-brick if the volume is a stripe volume*/
- if ((volinfo->type == GF_CLUSTER_TYPE_STRIPE) &&
- (volinfo->brick_count == volinfo->stripe_count)) {
- snprintf(err_str, sizeof(err_str),
- "Removing brick from a stripe volume is not allowed");
- gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED, "%s",
- err_str);
- ret = -1;
- goto out;
- }
-
- if (!replica_count && (volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE) &&
- (volinfo->brick_count == volinfo->dist_leaf_count)) {
- snprintf(err_str, sizeof(err_str),
- "Removing bricks from stripe-replicate"
- " configuration is not allowed without reducing "
- "replica or stripe count explicitly.");
- gf_msg(this->name, GF_LOG_ERROR, EPERM, GD_MSG_OP_NOT_PERMITTED_AC_REQD,
- "%s", err_str);
- ret = -1;
- goto out;
- }
-
if (!replica_count && (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
(volinfo->brick_count == volinfo->dist_leaf_count)) {
snprintf(err_str, sizeof(err_str),
@@ -1075,8 +780,7 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
/* Do not allow remove-brick if the bricks given is less than
the replica count or stripe count */
- if (!replica_count && (volinfo->type != GF_CLUSTER_TYPE_NONE) &&
- (volinfo->type != GF_CLUSTER_TYPE_TIER)) {
+ if (!replica_count && (volinfo->type != GF_CLUSTER_TYPE_NONE)) {
if (volinfo->dist_leaf_count && (count % volinfo->dist_leaf_count)) {
snprintf(err_str, sizeof(err_str),
"Remove brick "
@@ -1089,18 +793,13 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
}
}
- /* subvol match is not required for tiered volume*/
if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
- (volinfo->type != GF_CLUSTER_TYPE_TIER) &&
(volinfo->subvol_count > 1)) {
ret = subvol_matcher_init(&subvols, volinfo->subvol_count);
if (ret)
goto out;
}
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- count = glusterd_set_detach_bricks(dict, volinfo);
-
brickinfo_list = GF_CALLOC(count, sizeof(*brickinfo_list),
gf_common_mt_pointer);
if (!brickinfo_list) {
@@ -1141,18 +840,10 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
(volinfo->brick_count <= volinfo->dist_leaf_count))
continue;
- /* Find which subvolume the brick belongs to.
- * subvol match is not required for tiered volume
- *
- */
- if (volinfo->type != GF_CLUSTER_TYPE_TIER)
- subvol_matcher_update(subvols, volinfo, brickinfo);
+ subvol_matcher_update(subvols, volinfo, brickinfo);
}
- /* Check if the bricks belong to the same subvolumes.*/
- /* subvol match is not required for tiered volume*/
if ((volinfo->type != GF_CLUSTER_TYPE_NONE) &&
- (volinfo->type != GF_CLUSTER_TYPE_TIER) &&
(volinfo->subvol_count > 1)) {
ret = subvol_matcher_verify(subvols, volinfo, err_str, sizeof(err_str),
vol_type, replica_count);
@@ -1166,7 +857,17 @@ __glusterd_handle_remove_brick(rpcsvc_request_t *req)
if (ret)
goto out;
- ret = glusterd_op_begin_synctask(req, GD_OP_REMOVE_BRICK, dict);
+ if (conf->op_version < GD_OP_VERSION_8_0) {
+ gf_msg_debug(this->name, 0,
+ "The cluster is operating at "
+ "version less than %d. remove-brick operation"
+ "falling back to syncop framework.",
+ GD_OP_VERSION_8_0);
+ ret = glusterd_op_begin_synctask(req, GD_OP_REMOVE_BRICK, dict);
+ } else {
+ ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_REMOVE_BRICK,
+ dict);
+ }
out:
if (ret) {
@@ -1316,13 +1017,13 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
0,
};
gf_boolean_t restart_needed = 0;
- int caps = 0;
int brickid = 0;
char key[64] = "";
char *brick_mount_dir = NULL;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
gf_boolean_t is_valid_add_brick = _gf_false;
+ gf_boolean_t restart_shd = _gf_false;
struct statvfs brickstat = {
0,
};
@@ -1410,10 +1111,7 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
}
brickinfo->statfs_fsid = brickstat.f_fsid;
}
- /* hot tier bricks are added to head of brick list */
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- cds_list_add(&brickinfo->brick_list, &volinfo->bricks);
- } else if (stripe_count || replica_count) {
+ if (stripe_count || replica_count) {
add_brick_at_right_order(brickinfo, volinfo, (i - 1), stripe_count,
replica_count);
} else {
@@ -1478,22 +1176,19 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
if (count)
brick = strtok_r(brick_list + 1, " \n", &saveptr);
-#ifdef HAVE_BD_XLATOR
- if (brickinfo->vg[0])
- caps = CAPS_BD | CAPS_THIN | CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
-#endif
- /* This check needs to be added to distinguish between
- * attach-tier commands and add-brick commands.
- * When a tier is attached, adding is done via add-brick
- * and setting of pending xattrs shouldn't be done for
- * attach-tiers as they are virtually new volumes.
- */
if (glusterd_is_volume_replicate(volinfo)) {
- if (replica_count &&
- !dict_getn(dict, "attach-tier", SLEN("attach-tier")) &&
- conf->op_version >= GD_OP_VERSION_3_7_10) {
+ if (replica_count && conf->op_version >= GD_OP_VERSION_3_7_10) {
is_valid_add_brick = _gf_true;
+ if (volinfo->status == GLUSTERD_STATUS_STARTED) {
+ ret = volinfo->shd.svc.stop(&(volinfo->shd.svc), SIGTERM);
+ if (ret) {
+ gf_msg("glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_GLUSTER_SERVICES_STOP_FAIL,
+ "Failed to stop shd for %s.", volinfo->volname);
+ }
+ restart_shd = _gf_true;
+ }
ret = generate_dummy_client_volfiles(volinfo);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
@@ -1508,22 +1203,6 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
_gf_true);
if (ret)
goto out;
-#ifdef HAVE_BD_XLATOR
- char msg[1024] = "";
- /* Check for VG/thin pool if its BD volume */
- if (brickinfo->vg[0]) {
- ret = glusterd_is_valid_vg(brickinfo, 0, msg);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, 0, GD_MSG_INVALID_VG, "%s",
- msg);
- goto out;
- }
- /* if anyone of the brick does not have thin support,
- disable it for entire volume */
- caps &= brickinfo->caps;
- } else
- caps = 0;
-#endif
if (gf_uuid_is_null(brickinfo->uuid)) {
ret = glusterd_resolve_brick(brickinfo);
@@ -1568,7 +1247,6 @@ glusterd_op_perform_add_bricks(glusterd_volinfo_t *volinfo, int32_t count,
dict_foreach(volinfo->gsync_slaves, _glusterd_restart_gsync_session,
&param);
}
- volinfo->caps = caps;
generate_volfiles:
if (conf->op_version <= GD_OP_VERSION_3_7_5) {
@@ -1585,6 +1263,14 @@ generate_volfiles:
out:
GF_FREE(free_ptr1);
GF_FREE(free_ptr2);
+ if (restart_shd) {
+ if (volinfo->shd.svc.manager(&(volinfo->shd.svc), volinfo,
+ PROC_START_NO_WAIT)) {
+ gf_msg("glusterd", GF_LOG_CRITICAL, 0,
+ GD_MSG_GLUSTER_SERVICE_START_FAIL,
+ "Failed to start shd for %s.", volinfo->volname);
+ }
+ }
gf_msg_debug("glusterd", 0, "Returning %d", ret);
return ret;
@@ -1673,14 +1359,14 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
"Unable to get volume name");
goto out;
}
ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
"Unable to find volume: %s", volname);
goto out;
}
@@ -1692,13 +1378,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
&replica_count);
if (ret) {
- gf_msg_debug(THIS->name, 0, "Unable to get replica count");
- }
-
- ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
- &arbiter_count);
- if (ret) {
- gf_msg_debug(THIS->name, 0, "No arbiter count present in the dict");
+ gf_msg_debug(this->name, 0, "Unable to get replica count");
}
if (replica_count > 0) {
@@ -1712,19 +1392,20 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
}
}
- if (glusterd_is_volume_replicate(volinfo)) {
+ glusterd_add_peers_to_auth_list(volname);
+
+ if (replica_count && glusterd_is_volume_replicate(volinfo)) {
/* Do not allow add-brick for stopped volumes when replica-count
* is being increased.
*/
- if (conf->op_version >= GD_OP_VERSION_3_7_10 &&
- !dict_getn(dict, "attach-tier", SLEN("attach-tier")) &&
- replica_count && GLUSTERD_STATUS_STOPPED == volinfo->status) {
+ if (GLUSTERD_STATUS_STOPPED == volinfo->status &&
+ conf->op_version >= GD_OP_VERSION_3_7_10) {
ret = -1;
snprintf(msg, sizeof(msg),
" Volume must not be in"
" stopped state when replica-count needs to "
" be increased.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
msg);
*op_errstr = gf_strdup(msg);
goto out;
@@ -1732,25 +1413,31 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
/* op-version check for replica 2 to arbiter conversion. If we
* don't have this check, an older peer added as arbiter brick
* will not have the arbiter xlator in its volfile. */
- if ((conf->op_version < GD_OP_VERSION_3_8_0) && (arbiter_count == 1) &&
- (replica_count == 3)) {
- ret = -1;
- snprintf(msg, sizeof(msg),
- "Cluster op-version must "
- "be >= 30800 to add arbiter brick to a "
- "replica 2 volume.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
- msg);
- *op_errstr = gf_strdup(msg);
- goto out;
+ if ((replica_count == 3) && (conf->op_version < GD_OP_VERSION_3_8_0)) {
+ ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
+ &arbiter_count);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "No arbiter count present in the dict");
+ } else if (arbiter_count == 1) {
+ ret = -1;
+ snprintf(msg, sizeof(msg),
+ "Cluster op-version must "
+ "be >= 30800 to add arbiter brick to a "
+ "replica 2 volume.");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
}
/* Do not allow increasing replica count for arbiter volumes. */
- if (replica_count && volinfo->arbiter_count) {
+ if (volinfo->arbiter_count) {
ret = -1;
snprintf(msg, sizeof(msg),
"Increasing replica count "
"for arbiter volumes is not supported.");
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
msg);
*op_errstr = gf_strdup(msg);
goto out;
@@ -1759,6 +1446,43 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
is_force = dict_get_str_boolean(dict, "force", _gf_false);
+ /* Check brick order if the volume type is replicate or disperse. If
+ * force at the end of command not given then check brick order.
+ * doing this check at the originator node is sufficient.
+ */
+
+ if (!is_force && is_origin_glusterd(dict)) {
+ ret = 0;
+ if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
+ gf_msg_debug(this->name, 0,
+ "Replicate cluster type "
+ "found. Checking brick order.");
+ if (replica_count)
+ ret = glusterd_check_brick_order(dict, msg, volinfo->type,
+ &volname, &bricks, &count,
+ replica_count);
+ else
+ ret = glusterd_check_brick_order(dict, msg, volinfo->type,
+ &volname, &bricks, &count,
+ volinfo->replica_count);
+ } else if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
+ gf_msg_debug(this->name, 0,
+ "Disperse cluster type"
+ " found. Checking brick order.");
+ ret = glusterd_check_brick_order(dict, msg, volinfo->type, &volname,
+ &bricks, &count,
+ volinfo->disperse_count);
+ }
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
+ "Not adding brick because of "
+ "bad brick order. %s",
+ msg);
+ *op_errstr = gf_strdup(msg);
+ goto out;
+ }
+ }
+
if (volinfo->replica_count < replica_count && !is_force) {
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
@@ -1775,7 +1499,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (len < 0) {
strcpy(msg, "<error>");
}
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL, "%s",
msg);
*op_errstr = gf_strdup(msg);
goto out;
@@ -1807,46 +1531,40 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
"Volume name %s rebalance is in "
"progress. Please retry after completion",
volname);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg);
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OIP_RETRY_LATER, "%s", msg);
*op_errstr = gf_strdup(msg);
ret = -1;
goto out;
}
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- /*
- * This check is needed because of add/remove brick
- * is not supported on a tiered volume. So once a tier
- * is attached we cannot commit or stop the remove-brick
- * task. Please change this comment once we start supporting
- * add/remove brick on a tiered volume.
- */
- if (!gd_is_remove_brick_committed(volinfo)) {
- snprintf(msg, sizeof(msg),
- "An earlier remove-brick "
- "task exists for volume %s. Either commit it"
- " or stop it before attaching a tier.",
- volinfo->volname);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_OLD_REMOVE_BRICK_EXISTS,
- "%s", msg);
- *op_errstr = gf_strdup(msg);
- ret = -1;
+ if (volinfo->snap_count > 0 || !cds_list_empty(&volinfo->snap_volumes)) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s has %" PRIu64
+ " snapshots. "
+ "Changing the volume configuration will not effect snapshots."
+ "But the snapshot brick mount should be intact to "
+ "make them function.",
+ volname, volinfo->snap_count);
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s", msg);
+ msg[0] = '\0';
+ }
+
+ if (!count) {
+ ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get count");
goto out;
}
}
- ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get count");
- goto out;
- }
-
- ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get bricks");
- goto out;
+ if (!bricks) {
+ ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get bricks");
+ goto out;
+ }
}
if (bricks) {
@@ -1865,7 +1583,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
"brick path %s is "
"too long",
brick);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s",
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRKPATH_TOO_LONG, "%s",
msg);
*op_errstr = gf_strdup(msg);
@@ -1876,7 +1594,7 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true,
NULL);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
"Add-brick: Unable"
" to get brickinfo");
goto out;
@@ -1892,20 +1610,9 @@ glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
}
if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
-#ifdef HAVE_BD_XLATOR
- if (brickinfo->vg[0]) {
- ret = glusterd_is_valid_vg(brickinfo, 1, msg);
- if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_VG,
- "%s", msg);
- *op_errstr = gf_strdup(msg);
- goto out;
- }
- }
-#endif
-
ret = glusterd_validate_and_create_brickpath(
- brickinfo, volinfo->volume_id, op_errstr, is_force, _gf_false);
+ brickinfo, volinfo->volume_id, volinfo->volname, op_errstr,
+ is_force, _gf_false);
if (ret)
goto out;
@@ -1957,7 +1664,7 @@ out:
GF_FREE(str_ret);
GF_FREE(all_bricks);
- gf_msg_debug(THIS->name, 0, "Returning %d", ret);
+ gf_msg_debug(this->name, 0, "Returning %d", ret);
return ret;
}
@@ -1981,6 +1688,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
};
glusterd_conf_t *priv = THIS->private;
int pid = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
/* Check whether all the nodes of the bricks to be removed are
* up, if not fail the operation */
@@ -1989,6 +1698,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
ret = dict_get_strn(dict, key, keylen, &brick);
if (ret) {
snprintf(msg, sizeof(msg), "Unable to get %s", key);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "key=%s", key, NULL);
*errstr = gf_strdup(msg);
goto out;
}
@@ -2000,54 +1711,30 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
"Incorrect brick "
"%s for volume %s",
brick, volinfo->volname);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INCORRECT_BRICK,
+ "Brick=%s, Volume=%s", brick, volinfo->volname, NULL);
*errstr = gf_strdup(msg);
goto out;
}
/* Do not allow commit if the bricks are not decommissioned
- * if its a remove brick commit or detach-tier commit
+ * if its a remove brick commit
*/
- if (!brickinfo->decommissioned) {
- if (cmd == GF_OP_CMD_COMMIT) {
- snprintf(msg, sizeof(msg),
- "Brick %s "
- "is not decommissioned. "
- "Use start or force option",
- brick);
- *errstr = gf_strdup(msg);
- ret = -1;
- goto out;
- }
-
- if (cmd == GF_OP_CMD_DETACH_COMMIT ||
- cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT) {
- snprintf(msg, sizeof(msg),
- "Bricks in Hot "
- "tier are not decommissioned yet. Use "
- "gluster volume tier <VOLNAME> "
- "detach start to start the decommission process");
- *errstr = gf_strdup(msg);
- ret = -1;
- goto out;
- }
- } else {
- if ((cmd == GF_OP_CMD_DETACH_COMMIT ||
- (cmd_defrag == GF_DEFRAG_CMD_DETACH_COMMIT)) &&
- (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_STARTED)) {
- snprintf(msg, sizeof(msg),
- "Bricks in Hot "
- "tier are not decommissioned yet. Wait for "
- "the detach to complete using gluster volume "
- "tier <VOLNAME> status.");
- *errstr = gf_strdup(msg);
- ret = -1;
- goto out;
- }
+ if (!brickinfo->decommissioned && cmd == GF_OP_CMD_COMMIT) {
+ snprintf(msg, sizeof(msg),
+ "Brick %s "
+ "is not decommissioned. "
+ "Use start or force option",
+ brick);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_NOT_DECOM,
+ "Use 'start' or 'force' option, Brick=%s", brick, NULL);
+ *errstr = gf_strdup(msg);
+ ret = -1;
+ goto out;
}
if (glusterd_is_local_brick(THIS, volinfo, brickinfo)) {
switch (cmd) {
case GF_OP_CMD_START:
- case GF_OP_CMD_DETACH_START:
goto check;
case GF_OP_CMD_NONE:
default:
@@ -2055,8 +1742,6 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
}
switch (cmd_defrag) {
- case GF_DEFRAG_CMD_DETACH_START:
- break;
case GF_DEFRAG_CMD_NONE:
default:
continue;
@@ -2068,6 +1753,10 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
"brick %s. Use force option to "
"remove the offline brick",
brick);
+ gf_smsg(
+ this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_STOPPED,
+ "Use 'force' option to remove the offline brick, Brick=%s",
+ brick, NULL);
*errstr = gf_strdup(msg);
ret = -1;
goto out;
@@ -2078,6 +1767,8 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
"Found dead "
"brick %s",
brick);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_DEAD,
+ "Brick=%s", brick, NULL);
*errstr = gf_strdup(msg);
ret = -1;
goto out;
@@ -2087,29 +1778,33 @@ glusterd_remove_brick_validate_bricks(gf1_op_commands cmd, int32_t brick_count,
continue;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_uuid(brickinfo->uuid);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
snprintf(msg, sizeof(msg),
"Host node of the "
"brick %s is not in cluster",
brick);
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_BRICK_HOST_NOT_FOUND, "Brick=%s", brick, NULL);
*errstr = gf_strdup(msg);
ret = -1;
- rcu_read_unlock();
goto out;
}
if (!peerinfo->connected) {
+ RCU_READ_UNLOCK;
snprintf(msg, sizeof(msg),
"Host node of the "
"brick %s is down",
brick);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_HOST_DOWN,
+ "Brick=%s", brick, NULL);
*errstr = gf_strdup(msg);
ret = -1;
- rcu_read_unlock();
goto out;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
}
out:
@@ -2184,6 +1879,7 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
errstr = gf_strdup(
"Deleting all the bricks of the "
"volume is not allowed");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_DELETE, NULL);
ret = -1;
goto out;
}
@@ -2192,24 +1888,13 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
switch (cmd) {
case GF_OP_CMD_NONE:
errstr = gf_strdup("no remove-brick command issued");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NO_REMOVE_CMD,
+ NULL);
goto out;
case GF_OP_CMD_STATUS:
ret = 0;
goto out;
-
- case GF_OP_CMD_DETACH_START:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "volume %s is not a tier "
- "volume",
- volinfo->volname);
- errstr = gf_strdup(msg);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s",
- errstr);
- goto out;
- }
-
case GF_OP_CMD_START: {
if ((volinfo->type == GF_CLUSTER_TYPE_REPLICATE) &&
dict_getn(dict, "replica-count", SLEN("replica-count"))) {
@@ -2224,21 +1909,12 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
}
if (GLUSTERD_STATUS_STARTED != volinfo->status) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "Volume %s needs "
- "to be started before detach-tier "
- "(you can use 'force' or 'commit' "
- "to override this behavior)",
- volinfo->volname);
- } else {
- snprintf(msg, sizeof(msg),
- "Volume %s needs "
- "to be started before remove-brick "
- "(you can use 'force' or 'commit' "
- "to override this behavior)",
- volinfo->volname);
- }
+ snprintf(msg, sizeof(msg),
+ "Volume %s needs "
+ "to be started before remove-brick "
+ "(you can use 'force' or 'commit' "
+ "to override this behavior)",
+ volinfo->volname);
errstr = gf_strdup(msg);
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED,
"%s", errstr);
@@ -2287,6 +1963,21 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
goto out;
}
+ if (volinfo->snap_count > 0 ||
+ !cds_list_empty(&volinfo->snap_volumes)) {
+ snprintf(msg, sizeof(msg),
+ "Volume %s has %" PRIu64
+ " snapshots. "
+ "Changing the volume configuration will not effect "
+ "snapshots."
+ "But the snapshot brick mount should be intact to "
+ "make them function.",
+ volname, volinfo->snap_count);
+ gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SNAP_WARN, "%s",
+ msg);
+ msg[0] = '\0';
+ }
+
ret = glusterd_remove_brick_validate_bricks(
cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
if (ret)
@@ -2315,55 +2006,16 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
}
case GF_OP_CMD_STOP:
- case GF_OP_CMD_STOP_DETACH_TIER:
ret = 0;
break;
- case GF_OP_CMD_DETACH_COMMIT:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "volume %s is not a tier "
- "volume",
- volinfo->volname);
- errstr = gf_strdup(msg);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s",
- errstr);
- goto out;
- }
- if (volinfo->decommission_in_progress) {
- errstr = gf_strdup(
- "use 'force' option as migration "
- "is in progress");
- goto out;
- }
- if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_FAILED) {
- errstr = gf_strdup(
- "use 'force' option as migration "
- "has failed");
- goto out;
- }
-
- ret = glusterd_remove_brick_validate_bricks(
- cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
- if (ret)
- goto out;
-
- /* If geo-rep is configured, for this volume, it should be
- * stopped.
- */
- param.volinfo = volinfo;
- ret = glusterd_check_geo_rep_running(&param, op_errstr);
- if (ret || param.is_active) {
- ret = -1;
- goto out;
- }
- break;
-
case GF_OP_CMD_COMMIT:
if (volinfo->decommission_in_progress) {
errstr = gf_strdup(
"use 'force' option as migration "
"is in progress");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_MIGRATION_PROG,
+ "Use 'force' option", NULL);
goto out;
}
@@ -2371,9 +2023,27 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
errstr = gf_strdup(
"use 'force' option as migration "
"has failed");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_MIGRATION_FAIL,
+ "Use 'force' option", NULL);
goto out;
}
+ if (volinfo->rebal.defrag_status == GF_DEFRAG_STATUS_COMPLETE) {
+ if (volinfo->rebal.rebalance_failures > 0 ||
+ volinfo->rebal.skipped_files > 0) {
+ errstr = gf_strdup(
+ "use 'force' option as migration "
+ "of some files might have been skipped or "
+ "has failed");
+ gf_smsg(this->name, GF_LOG_WARNING, 0,
+ GD_MSG_MIGRATION_FAIL,
+ "Use 'force' option, some files might have been "
+ "skipped",
+ NULL);
+ goto out;
+ }
+ }
+
ret = glusterd_remove_brick_validate_bricks(
cmd, brick_count, dict, volinfo, &errstr, GF_DEFRAG_CMD_NONE);
if (ret)
@@ -2391,18 +2061,11 @@ glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr)
break;
- case GF_OP_CMD_DETACH_COMMIT_FORCE:
- if (volinfo->type != GF_CLUSTER_TYPE_TIER) {
- snprintf(msg, sizeof(msg),
- "volume %s is not a tier "
- "volume",
- volinfo->volname);
- errstr = gf_strdup(msg);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_TIER, "%s",
- errstr);
- goto out;
- }
case GF_OP_CMD_COMMIT_FORCE:
+ case GF_OP_CMD_DETACH_START:
+ case GF_OP_CMD_DETACH_COMMIT:
+ case GF_OP_CMD_DETACH_COMMIT_FORCE:
+ case GF_OP_CMD_STOP_DETACH_TIER:
break;
}
ret = 0;
@@ -2413,7 +2076,8 @@ out:
if (op_errstr)
*op_errstr = errstr;
}
-
+ if (!op_errstr && errstr)
+ GF_FREE(errstr);
return ret;
}
@@ -2495,48 +2159,6 @@ glusterd_remove_brick_migrate_cbk(glusterd_volinfo_t *volinfo,
return ret;
}
-static int
-glusterd_op_perform_attach_tier(dict_t *dict, glusterd_volinfo_t *volinfo,
- int count, char *bricks)
-{
- int ret = 0;
- int replica_count = 0;
- int type = 0;
-
- /*
- * Store the new (cold) tier's structure until the graph is generated.
- * If there is a failure before the graph is generated the
- * structure will revert to its original state.
- */
- volinfo->tier_info.cold_dist_leaf_count = volinfo->dist_leaf_count;
- volinfo->tier_info.cold_type = volinfo->type;
- volinfo->tier_info.cold_brick_count = volinfo->brick_count;
- volinfo->tier_info.cold_replica_count = volinfo->replica_count;
- volinfo->tier_info.cold_disperse_count = volinfo->disperse_count;
- volinfo->tier_info.cold_redundancy_count = volinfo->redundancy_count;
-
- ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
- &replica_count);
- if (!ret)
- volinfo->tier_info.hot_replica_count = replica_count;
- else
- volinfo->tier_info.hot_replica_count = 1;
- volinfo->tier_info.hot_brick_count = count;
- ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type);
- volinfo->tier_info.hot_type = type;
- ret = dict_set_int32n(dict, "type", SLEN("type"), GF_CLUSTER_TYPE_TIER);
-
- if (!ret)
- ret = dict_set_nstrn(volinfo->dict, "features.ctr-enabled",
- SLEN("features.ctr-enabled"), "on", SLEN("on"));
-
- if (!ret)
- ret = dict_set_nstrn(volinfo->dict, "cluster.tier-mode",
- SLEN("cluster.tier-mode"), "cache", SLEN("cache"));
-
- return ret;
-}
-
int
glusterd_op_add_brick(dict_t *dict, char **op_errstr)
{
@@ -2584,11 +2206,6 @@ glusterd_op_add_brick(dict_t *dict, char **op_errstr)
goto out;
}
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- gf_msg_debug(THIS->name, 0, "Adding tier");
- glusterd_op_perform_attach_tier(dict, volinfo, count, bricks);
- }
-
ret = glusterd_op_perform_add_bricks(volinfo, count, bricks, dict);
if (ret) {
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL,
@@ -2617,94 +2234,118 @@ out:
}
int
-glusterd_op_add_tier_brick(dict_t *dict, char **op_errstr)
+glusterd_post_commit_add_brick(dict_t *dict, char **op_errstr)
{
int ret = 0;
char *volname = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- char *bricks = NULL;
- int32_t count = 0;
- this = THIS;
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
+ ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
- priv = this->private;
- GF_VALIDATE_OR_GOTO(this->name, priv, out);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+ ret = glusterd_replace_old_auth_allow_list(volname);
+out:
+ return ret;
+}
+
+int
+glusterd_post_commit_replace_brick(dict_t *dict, char **op_errstr)
+{
+ int ret = 0;
+ char *volname = NULL;
ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ gf_msg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
"Unable to get volume name");
goto out;
}
+ ret = glusterd_replace_old_auth_allow_list(volname);
+out:
+ return ret;
+}
- ret = glusterd_volinfo_find(volname, &volinfo);
+int
+glusterd_set_rebalance_id_for_remove_brick(dict_t *req_dict, dict_t *rsp_dict)
+{
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char msg[2048] = {0};
+ char *task_id_str = NULL;
+ xlator_t *this = NULL;
+ int32_t cmd = 0;
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(rsp_dict);
+ GF_ASSERT(req_dict);
+
+ ret = dict_get_strn(rsp_dict, "volname", SLEN("volname"), &volname);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
- "Volume not found");
+ gf_msg_debug(this->name, 0, "volname not found");
goto out;
}
- ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
+ ret = glusterd_volinfo_find(volname, &volinfo);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get count");
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND,
+ "Unable to allocate memory");
goto out;
}
- ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
+ ret = dict_get_int32n(rsp_dict, "command", SLEN("command"), &cmd);
if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Unable to get bricks");
+ gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Unable to get command");
goto out;
}
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- gf_msg_debug(THIS->name, 0, "Adding tier");
- glusterd_op_perform_attach_tier(dict, volinfo, count, bricks);
- }
+ /* remove brick task id is generted in glusterd_op_stage_remove_brick(),
+ * but rsp_dict is unavailable there. So copying it to rsp_dict from
+ * req_dict here. */
- ret = glusterd_op_perform_add_bricks(volinfo, count, bricks, dict);
- if (ret) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_BRICK_ADD_FAIL,
- "Unable to add bricks");
- goto out;
+ if (is_origin_glusterd(rsp_dict)) {
+ ret = dict_get_strn(req_dict, GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY), &task_id_str);
+ if (ret) {
+ snprintf(msg, sizeof(msg), "Missing rebalance id for remove-brick");
+ gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_REBALANCE_ID_MISSING,
+ "%s", msg);
+ ret = 0;
+ } else {
+ gf_uuid_parse(task_id_str, volinfo->rebal.rebalance_id);
+
+ ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id,
+ rsp_dict, GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ GD_MSG_REMOVE_BRICK_ID_SET_FAIL,
+ "Failed to set remove-brick-id");
+ goto out;
+ }
+ }
}
- if (priv->op_version <= GD_OP_VERSION_3_10_0) {
- ret = glusterd_store_volinfo(volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
+ if (!gf_uuid_is_null(volinfo->rebal.rebalance_id) &&
+ GD_OP_REMOVE_BRICK == volinfo->rebal.op) {
+ ret = glusterd_copy_uuid_to_dict(volinfo->rebal.rebalance_id, rsp_dict,
+ GF_REMOVE_BRICK_TID_KEY,
+ SLEN(GF_REMOVE_BRICK_TID_KEY));
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set task-id for volume %s", volname);
goto out;
- } else {
- /*
- * The cluster is operating at version greater than
- * gluster-3.10.0. So no need to store volfiles
- * in commit phase, the same will be done
- * in post validate phase with v3 framework.
- */
+ }
}
-
- if (GLUSTERD_STATUS_STARTED == volinfo->status)
- ret = glusterd_svcs_manager(volinfo);
-
out:
return ret;
}
-
-void
-glusterd_op_perform_detach_tier(glusterd_volinfo_t *volinfo)
-{
- volinfo->type = volinfo->tier_info.cold_type;
- volinfo->replica_count = volinfo->tier_info.cold_replica_count;
- volinfo->disperse_count = volinfo->tier_info.cold_disperse_count;
- volinfo->redundancy_count = volinfo->tier_info.cold_redundancy_count;
- volinfo->dist_leaf_count = volinfo->tier_info.cold_dist_leaf_count;
-}
-
int
glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
{
@@ -2721,8 +2362,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
int force = 0;
gf1_op_commands cmd = 0;
int32_t replica_count = 0;
- glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *tmp = NULL;
char *task_id_str = NULL;
xlator_t *this = NULL;
dict_t *bricks_dict = NULL;
@@ -2730,11 +2369,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
int start_remove = 0;
uint32_t commit_hash = 0;
int defrag_cmd = 0;
- int detach_commit = 0;
- void *tier_info = NULL;
- char *cold_shd_key = NULL;
- char *hot_shd_key = NULL;
- int delete_key = 1;
glusterd_conf_t *conf = NULL;
this = THIS;
@@ -2765,7 +2399,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
}
cmd = flag;
- if ((GF_OP_CMD_START == cmd) || (GF_OP_CMD_DETACH_START == cmd))
+ if (GF_OP_CMD_START == cmd)
start_remove = 1;
/* Set task-id, if available, in ctx dict for operations other than
@@ -2805,35 +2439,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
goto out;
case GF_OP_CMD_STOP:
- case GF_OP_CMD_STOP_DETACH_TIER: {
- /* Fall back to the old volume file */
- cds_list_for_each_entry_safe(brickinfo, tmp, &volinfo->bricks,
- brick_list)
- {
- if (!brickinfo->decommissioned)
- continue;
- brickinfo->decommissioned = 0;
- }
- ret = glusterd_create_volfiles_and_notify_services(volinfo);
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0,
- GD_MSG_VOLFILE_CREATE_FAIL, "failed to create volfiles");
- goto out;
- }
-
- ret = glusterd_store_volinfo(volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_SET_FAIL,
- "failed to store volinfo");
- goto out;
- }
-
- ret = 0;
- goto out;
- }
-
- case GF_OP_CMD_DETACH_START:
case GF_OP_CMD_START:
/* Reset defrag status to 'NOT STARTED' whenever a
* remove-brick/rebalance command is issued to remove
@@ -2841,6 +2446,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
* Update defrag_cmd as well or it will only be done
* for nodes on which the brick to be removed exists.
*/
+ /* coverity[MIXED_ENUMS] */
volinfo->rebal.defrag_cmd = cmd;
volinfo->rebal.defrag_status = GF_DEFRAG_STATUS_NOT_STARTED;
ret = dict_get_strn(dict, GF_REMOVE_BRICK_TID_KEY,
@@ -2859,43 +2465,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
force = 1;
break;
- case GF_OP_CMD_DETACH_COMMIT:
- case GF_OP_CMD_DETACH_COMMIT_FORCE:
- glusterd_op_perform_detach_tier(volinfo);
- detach_commit = 1;
-
- /* Disabling ctr when detaching a tier, since
- * currently tier is the only consumer of ctr.
- * Revisit this code when this constraint no
- * longer exist.
- */
- dict_deln(volinfo->dict, "features.ctr-enabled",
- SLEN("features.ctr-enabled"));
- dict_deln(volinfo->dict, "cluster.tier-mode",
- SLEN("cluster.tier-mode"));
-
- hot_shd_key = gd_get_shd_key(volinfo->tier_info.hot_type);
- cold_shd_key = gd_get_shd_key(volinfo->tier_info.cold_type);
- if (hot_shd_key) {
- /*
- * Since post detach, shd graph will not contain hot
- * tier. So we need to clear option set for hot tier.
- * For a tiered volume there can be different key
- * for both hot and cold. If hot tier is shd compatible
- * then we need to remove the configured value when
- * detaching a tier, only if the key's are different or
- * cold key is NULL. So we will set delete_key first,
- * and if cold key is not null and they are equal then
- * we will clear the flag. Otherwise we will delete the
- * key.
- */
- if (cold_shd_key)
- delete_key = strcmp(hot_shd_key, cold_shd_key);
- if (delete_key)
- dict_del(volinfo->dict, hot_shd_key);
- }
- /* fall through */
-
case GF_OP_CMD_COMMIT_FORCE:
if (volinfo->decommission_in_progress) {
@@ -2915,6 +2484,11 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
ret = 0;
force = 1;
break;
+ case GF_OP_CMD_DETACH_START:
+ case GF_OP_CMD_DETACH_COMMIT_FORCE:
+ case GF_OP_CMD_DETACH_COMMIT:
+ case GF_OP_CMD_STOP_DETACH_TIER:
+ break;
}
ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
@@ -2923,10 +2497,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
"Unable to get count");
goto out;
}
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER)
- count = glusterd_set_detach_bricks(dict, volinfo);
-
/* Save the list of bricks for later usage only on starting a
* remove-brick. Right now this is required for displaying the task
* parameters with task status in volume status.
@@ -2979,12 +2549,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
i++;
}
- if (detach_commit) {
- /* Clear related information from volinfo */
- tier_info = ((void *)(&volinfo->tier_info));
- memset(tier_info, 0, sizeof(volinfo->tier_info));
- }
-
if (start_remove)
volinfo->rebal.dict = dict_ref(bricks_dict);
@@ -3007,16 +2571,11 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
* volumes undergoing a detach operation, they should not
* be modified here.
*/
- if ((replica_count == 1) && (cmd != GF_OP_CMD_DETACH_COMMIT) &&
- (cmd != GF_OP_CMD_DETACH_COMMIT_FORCE)) {
+ if (replica_count == 1) {
if (volinfo->type == GF_CLUSTER_TYPE_REPLICATE) {
volinfo->type = GF_CLUSTER_TYPE_NONE;
/* backward compatibility */
volinfo->sub_count = 0;
- } else {
- volinfo->type = GF_CLUSTER_TYPE_STRIPE;
- /* backward compatibility */
- volinfo->sub_count = volinfo->dist_leaf_count;
}
}
}
@@ -3050,7 +2609,7 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
}
if (start_remove && volinfo->status == GLUSTERD_STATUS_STARTED) {
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(volinfo);
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_RECONF_FAIL,
"Unable to reconfigure NFS-Server");
@@ -3073,8 +2632,6 @@ glusterd_op_remove_brick(dict_t *dict, char **op_errstr)
}
/* perform the rebalance operations */
defrag_cmd = GF_DEFRAG_CMD_START_FORCE;
- if (cmd == GF_OP_CMD_DETACH_START)
- defrag_cmd = GF_DEFRAG_CMD_START_DETACH_TIER;
/*
* We need to set this *before* we issue commands to the
* bricks, or else we might end up setting it after the bricks
@@ -3103,7 +2660,7 @@ out:
GF_FREE(brick_tmpstr);
if (bricks_dict)
dict_unref(bricks_dict);
-
+ gf_msg_debug(this->name, 0, "returning %d ", ret);
return ret;
}
@@ -3221,202 +2778,19 @@ out:
}
int
-__glusterd_handle_add_tier_brick(rpcsvc_request_t *req)
-{
- int32_t ret = -1;
- gf_cli_req cli_req = {{
- 0,
- }};
- dict_t *dict = NULL;
- char *bricks = NULL;
- char *volname = NULL;
- int brick_count = 0;
- void *cli_rsp = NULL;
- char err_str[2048] = "";
- gf_cli_rsp rsp = {
- 0,
- };
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
- int32_t replica_count = 0;
- int32_t arbiter_count = 0;
- int type = 0;
-
- this = THIS;
- GF_VALIDATE_OR_GOTO("glusterd", this, out);
-
- GF_VALIDATE_OR_GOTO(this->name, req, out);
-
- ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
- if (ret < 0) {
- /*failed to decode msg*/
- req->rpc_err = GARBAGE_ARGS;
- snprintf(err_str, sizeof(err_str), "Garbage args received");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, "%s",
- err_str);
- goto out;
- }
-
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_ADD_BRICK_REQ_RECVD,
- "Received add brick req");
-
- if (cli_req.dict.dict_len) {
- /* Unserialize the dictionary */
- dict = dict_new();
-
- ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
- &dict);
- if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, errno,
- GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to "
- "unserialize req-buffer to dictionary");
- snprintf(err_str, sizeof(err_str),
- "Unable to decode "
- "the command");
- goto out;
- }
- }
-
- ret = dict_get_strn(dict, "volname", SLEN("volname"), &volname);
-
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volume "
- "name");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
- err_str);
- goto out;
- }
-
- if (!glusterd_check_volume_exists(volname)) {
- snprintf(err_str, sizeof(err_str), "Volume %s does not exist", volname);
- gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
- err_str);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n(dict, "count", SLEN("count"), &brick_count);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volume "
- "brick count");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
- err_str);
- goto out;
- }
-
- ret = dict_get_int32n(dict, "replica-count", SLEN("replica-count"),
- &replica_count);
- if (!ret) {
- gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
- "replica-count is %d", replica_count);
- }
-
- ret = dict_get_int32n(dict, "arbiter-count", SLEN("arbiter-count"),
- &arbiter_count);
- if (!ret) {
- gf_msg(this->name, GF_LOG_INFO, errno, GD_MSG_DICT_GET_SUCCESS,
- "arbiter-count is %d", arbiter_count);
- }
-
- if (!dict_getn(dict, "force", SLEN("force"))) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "Failed to get flag");
- ret = -1;
- goto out;
- }
-
- ret = glusterd_volinfo_find(volname, &volinfo);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volinfo "
- "for volume name %s",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s",
- err_str);
- goto out;
- }
-
- if (glusterd_is_tiering_supported(err_str) == _gf_false) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
- "Tiering not supported at this version");
- ret = -1;
- goto out;
- }
-
- if (dict_getn(dict, "attach-tier", SLEN("attach-tier"))) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- snprintf(err_str, sizeof(err_str), "Volume %s is already a tier.",
- volname);
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_TIER, "%s",
- err_str);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_int32n(dict, "hot-type", SLEN("hot-type"), &type);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
- "failed to get type from dictionary");
- goto out;
- }
- }
-
- ret = dict_get_strn(dict, "bricks", SLEN("bricks"), &bricks);
- if (ret) {
- snprintf(err_str, sizeof(err_str),
- "Unable to get volume "
- "bricks");
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED, "%s",
- err_str);
- goto out;
- }
-
- if (type != volinfo->type) {
- ret = dict_set_int32n(dict, "type", SLEN("type"), type);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
- "failed to set the new type in dict");
- goto out;
- }
- }
-
- ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_ADD_TIER_BRICK, dict);
-
-out:
- if (ret) {
- rsp.op_ret = -1;
- rsp.op_errno = 0;
- if (err_str[0] == '\0')
- snprintf(err_str, sizeof(err_str), "Operation failed");
- rsp.op_errstr = err_str;
- cli_rsp = &rsp;
- glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
- dict);
- ret = 0; /*sent error to cli, prevent second reply*/
- }
-
- free(cli_req.dict.dict_val); /*its malloced by xdr*/
-
- return ret;
-}
-
-int
glusterd_handle_add_tier_brick(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler(req, __glusterd_handle_add_tier_brick);
+ return 0;
}
int
glusterd_handle_attach_tier(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler(req, __glusterd_handle_add_brick);
+ return 0;
}
int
glusterd_handle_detach_tier(rpcsvc_request_t *req)
{
- return glusterd_big_locked_handler(req, __glusterd_handle_remove_brick);
+ return 0;
}