summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-utils.c
diff options
context:
space:
mode:
authorKaushal M <kaushal@redhat.com>2013-11-14 12:15:53 +0530
committerAnand Avati <avati@redhat.com>2013-11-20 11:32:09 -0800
commit3c38ba1e7b4959602f945112a26b8aee904fefaa (patch)
tree9a282d069db2a48c06d13777cfdd56a630ae2cfe /xlators/mgmt/glusterd/src/glusterd-utils.c
parentbc9f0bb5ce108cba7e88be123681e2c269da31b7 (diff)
glusterd: Start rebalance only where required
Gluster was starting rebalance processes on peers where it wasn't required in two cases. - For a normal rebalance command on a volume, rebalance processes were started on all peers instead of just the peers which contain bricks of the volume - For rebalance process being restarted by a volume sync, caused by a new peer being probed or a peer restarting, rebalance processes were started on all peers, for both a normal rebalance and for remove-brick needing rebalance. This patch adds a new check before starting rebalance process in the above two cases. - For rebalance process required by a rebalance command, each peer will check if it contains atleast one brick of the volume - For rebalance process required by a remove-brick command, each peer will check if it contains atleast one of the bricks being removed Change-Id: I512da16994f0d5482889c3a009c46dc20a8a15bb BUG: 1031887 Signed-off-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.org/6301 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Krutika Dhananjay <kdhananj@redhat.com> Reviewed-by: Anand Avati <avati@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-utils.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c65
1 files changed, 65 insertions, 0 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 9e8b7c5b6..e93b88ecf 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -6233,6 +6233,8 @@ glusterd_restart_rebalance (glusterd_conf_t *conf)
list_for_each_entry (volinfo, &conf->volumes, vol_list) {
if (!volinfo->rebal.defrag_cmd)
continue;
+ if (!gd_should_i_start_rebalance (volinfo))
+ continue;
glusterd_volume_defrag_restart (volinfo, op_errstr, 256,
volinfo->rebal.defrag_cmd, NULL);
}
@@ -7999,3 +8001,66 @@ glusterd_is_status_tasks_op (glusterd_op_t op, dict_t *dict)
out:
return is_status_tasks;
}
+
+/* Tells if rebalance needs to be started for the given volume on the peer
+ *
+ * Rebalance should be started on a peer only if an involved brick is present on
+ * the peer.
+ *
+ * For a normal rebalance, if any one brick of the given volume is present on
+ * the peer, the rebalance process should be started.
+ *
+ * For a rebalance as part of a remove-brick operation, the rebalance process
+ * should be started only if one of the bricks being removed is present on the
+ * peer
+ */
+gf_boolean_t
+gd_should_i_start_rebalance (glusterd_volinfo_t *volinfo) {
+ gf_boolean_t retval = _gf_false;
+ int ret = -1;
+ glusterd_brickinfo_t *brick = NULL;
+ int count = 0;
+ int i = 0;
+ char key[1023] = {0,};
+ char *brickname = NULL;
+
+
+ switch (volinfo->rebal.op) {
+ case GD_OP_REBALANCE:
+ list_for_each_entry (brick, &volinfo->bricks, brick_list) {
+ if (uuid_compare (MY_UUID, brick->uuid) == 0) {
+ retval = _gf_true;
+ break;
+ }
+ }
+ break;
+ case GD_OP_REMOVE_BRICK:
+ ret = dict_get_int32 (volinfo->rebal.dict, "count", &count);
+ if (ret) {
+ goto out;
+ }
+ for (i = 1; i <= count; i++) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "brick%d", i);
+ ret = dict_get_str (volinfo->rebal.dict, key,
+ &brickname);
+ if (ret)
+ goto out;
+ ret = glusterd_volume_brickinfo_get_by_brick (brickname,
+ volinfo,
+ &brick);
+ if (ret)
+ goto out;
+ if (uuid_compare (MY_UUID, brick->uuid) == 0) {
+ retval = _gf_true;
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ return retval;
+}