summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
diff options
context:
space:
mode:
authorAnuradha Talur <atalur@redhat.com>2016-03-16 10:55:09 +0530
committerAtin Mukherjee <amukherj@redhat.com>2016-03-21 10:51:00 -0700
commit020bc022c342c4c015e29c63399757e36d653a49 (patch)
treeca9ec7704a711c7706e40d1b988111978168d436 /xlators/mgmt/glusterd/src/glusterd-brick-ops.c
parent696fbf9b18078a7ac28080d841f0de2306786b87 (diff)
glusterd / afr : Enable auto heal when replica count increases
In replicate volumes, when a brick is added to a replicate group, heal to the new brick should be triggered. Also, the new brick should not be considered as source for healing till it is up to date. Previously, extended attributes had to be set manually on the bricks for this to happen. This patch is part 1 patch to automate this process. Change-Id: I29958448618372bfde23bf1dac5dd23dba1ad98f BUG: 1276203 Signed-off-by: Anuradha Talur <atalur@redhat.com> Reviewed-on: http://review.gluster.org/12451 Reviewed-by: Atin Mukherjee <amukherj@redhat.com> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Ravishankar N <ravishankar@redhat.com> Smoke: Gluster Build System <jenkins@build.gluster.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-brick-ops.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c86
1 files changed, 70 insertions, 16 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index d77626d3457..920f7f05623 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -21,6 +21,7 @@
#include "glusterd-messages.h"
#include "glusterd-server-quorum.h"
#include "run.h"
+#include "glusterd-volgen.h"
#include <sys/signal.h>
/* misc */
@@ -1233,6 +1234,7 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
char *brick_mount_dir = NULL;
xlator_t *this = NULL;
glusterd_conf_t *conf = NULL;
+ gf_boolean_t is_valid_add_brick = _gf_false;
this = THIS;
GF_ASSERT (this);
@@ -1320,6 +1322,7 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
/* Gets changed only if the options are given in add-brick cli */
if (type)
volinfo->type = type;
+
if (replica_count) {
volinfo->replica_count = replica_count;
}
@@ -1355,6 +1358,27 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
#endif
+ /* This check needs to be added to distinguish between
+ * attach-tier commands and add-brick commands.
+ * When a tier is attached, adding is done via add-brick
+ * and setting of pending xattrs shouldn't be done for
+ * attach-tiers as they are virtually new volumes.
+ */
+ if (glusterd_is_volume_replicate (volinfo)) {
+ if (replica_count &&
+ !dict_get (dict, "attach-tier") &&
+ conf->op_version >= GD_OP_VERSION_3_7_10) {
+ is_valid_add_brick = _gf_true;
+ ret = generate_dummy_client_volfiles (volinfo);
+ if (ret) {
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_VOLFILE_CREATE_FAIL,
+ "Failed to create volfile.");
+ goto out;
+ }
+ }
+ }
+
while (i <= count) {
ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
&brickinfo);
@@ -1386,6 +1410,16 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count,
}
}
+ /* if the volume is a replicate volume, do: */
+ if (is_valid_add_brick) {
+ if (!gf_uuid_compare (brickinfo->uuid, MY_UUID)) {
+ ret = glusterd_handle_replicate_brick_ops (
+ volinfo, brickinfo,
+ GD_OP_ADD_BRICK);
+ if (ret < 0)
+ goto out;
+ }
+ }
ret = glusterd_brick_start (volinfo, brickinfo,
_gf_true);
if (ret)
@@ -1514,22 +1548,6 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
conf = this->private;
GF_ASSERT (conf);
- ret = dict_get_int32 (dict, "replica-count", &replica_count);
- if (ret) {
- gf_msg_debug (THIS->name, 0,
- "Unable to get replica count");
- }
-
- if (replica_count > 0) {
- ret = op_version_check (this, GD_OP_VER_PERSISTENT_AFR_XATTRS,
- msg, sizeof(msg));
- if (ret) {
- gf_msg (this->name, GF_LOG_ERROR, 0,
- GD_MSG_OP_VERSION_MISMATCH, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- }
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
gf_msg (THIS->name, GF_LOG_ERROR, errno,
@@ -1550,6 +1568,42 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
if (ret)
goto out;
+ ret = dict_get_int32 (dict, "replica-count", &replica_count);
+ if (ret) {
+ gf_msg_debug (THIS->name, 0,
+ "Unable to get replica count");
+ }
+
+ if (replica_count > 0) {
+ ret = op_version_check (this, GD_OP_VER_PERSISTENT_AFR_XATTRS,
+ msg, sizeof(msg));
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_OP_VERSION_MISMATCH, "%s", msg);
+ *op_errstr = gf_strdup (msg);
+ goto out;
+ }
+ }
+
+ /* Do not allow add-brick for stopped volumes when replica-count
+ * is being increased.
+ */
+ if (glusterd_is_volume_replicate (volinfo)) {
+ if (conf->op_version >= GD_OP_VERSION_3_7_10 &&
+ !dict_get (dict, "attach-tier") &&
+ replica_count &&
+ GLUSTERD_STATUS_STOPPED == volinfo->status) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), " Volume must not be in"
+ " stopped state when replica-count needs to "
+ " be increased.");
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_BRICK_ADD_FAIL, "%s", msg);
+ *op_errstr = gf_strdup (msg);
+ goto out;
+ }
+ }
+
if (conf->op_version > GD_OP_VERSION_3_7_5 &&
is_origin_glusterd (dict)) {
ret = glusterd_validate_quorum (this, GD_OP_ADD_BRICK, dict,