diff options
author | yinkui <13965432176@163.com> | 2019-06-28 13:57:30 +0800 |
---|---|---|
committer | Amar Tumballi <amar@kadalu.io> | 2020-06-09 01:40:01 +0000 |
commit | 06fa9869070146e8c832cb930536c66c91494fd8 (patch) | |
tree | 059f99b90c8ecde4cff2e34dad8e6e6bdab2a00c | |
parent | d21d656bc4a91bf0e7b0d05df26a2bb8b3204891 (diff) |
glusterd: To do full heal in different online node when do ec/afr full heal
For example:
We have 3 nodes and create ec 3*(2+1) volume for
test-disperse-0/test-disperse-1/test-disperse-2 when we do
'gluster v heal test full' in node-1 that can in node-1/
node-2/node-3 glustershd's get op=GF_EVENT_TRANSLATOR_OP
and then do full heal in different disperse group.
Let us say we have 2X(2+1) disperse with each brick
from different machine m0, m1, m2, m3, m4, m5. and candidate_max is m5.
and do full heal so '*index' is 3 and !gf_uuid_compare(MY_UUID, brickinfo->uuid)
will be true in m3,and then m3's glustershd will be the heal-xlator.
Id: I5c6762e6cfb375aed32d3fc11fe5eae3ee41aab4
Signed-off-by: yinkui <13965432176@163.com>
Change-Id: Ic7ef3ddfd30b5f4714ba99b4e7b708c927d68764
fixes: bz#1724948
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 39 |
1 files changed, 36 insertions, 3 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 26a0eb7d20f..0b7ef6d5d40 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -6474,6 +6474,10 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo = NULL; int hxl_children = 0; uuid_t candidate = {0}; + int brick_index = 0; + glusterd_peerinfo_t *peerinfo = NULL; + int delta = 0; + uuid_t candidate_max = {0}; if ((*index) == 0) (*index)++; @@ -6485,13 +6489,40 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo, cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list) { + if (gf_uuid_compare(brickinfo->uuid, candidate_max) > 0) { + if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) { + gf_uuid_copy(candidate_max, brickinfo->uuid); + } else { + peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL); + if (peerinfo && peerinfo->connected) { + gf_uuid_copy(candidate_max, brickinfo->uuid); + } + } + } + } + + cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list) + { if (gf_uuid_is_null(brickinfo->uuid)) (void)glusterd_resolve_brick(brickinfo); - if (gf_uuid_compare(brickinfo->uuid, candidate) > 0) - gf_uuid_copy(candidate, brickinfo->uuid); + delta %= hxl_children; + if ((*index + delta) == (brick_index + hxl_children)) { + if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) { + gf_uuid_copy(candidate, brickinfo->uuid); + } else { + peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL); + if (peerinfo && peerinfo->connected) { + gf_uuid_copy(candidate, brickinfo->uuid); + } else if (peerinfo && + (!gf_uuid_compare(candidate_max, MY_UUID))) { + _add_hxlator_to_dict(dict, volinfo, + ((*index) - 1) / hxl_children, + (*hxlator_count)); + (*hxlator_count)++; + } + } - if ((*index) % hxl_children == 0) { if (!gf_uuid_compare(MY_UUID, candidate)) { _add_hxlator_to_dict(dict, volinfo, ((*index) - 1) / hxl_children, @@ -6499,6 +6530,8 @@ _select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo, (*hxlator_count)++; } gf_uuid_clear(candidate); + brick_index += hxl_children; + delta++; } (*index)++; |