diff options
author | Atin Mukherjee <amukherj@redhat.com> | 2018-01-03 14:29:51 +0530 |
---|---|---|
committer | Shyamsundar Ranganathan <srangana@redhat.com> | 2018-01-09 13:58:47 +0000 |
commit | 745e0cc1222fb47b4e406e872aa3ae5a92bde542 (patch) | |
tree | 7144cb7582d1382e4e1e7e288696b3dfe0e585c6 /xlators/mgmt/glusterd/src/glusterd-server-quorum.c | |
parent | a486a6cc40fed23bc59dbecb415ea5be0ee0d872 (diff) |
glusterd: connect to an existing brick process when qourum status is NOT_APPLICABLE_QUORUM
First of all, this patch reverts commit 635c1c3 as the same is causing a
regression with bricks not coming up on time when a node is rebooted.
This patch tries to fix the problem in a different way by just trying to
connect to an existing running brick when quorum status is not
applicable.
>mainline patch : https://review.gluster.org/#/c/19134/
Change-Id: I0efb5901832824b1c15dcac529bffac85173e097
BUG: 1511293
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-server-quorum.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 27 |
1 files changed, 22 insertions, 5 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c index 25fbbd5bc56..f3fd5c8c9aa 100644 --- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c +++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c @@ -316,6 +316,7 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo = NULL; gd_quorum_status_t quorum_status = NOT_APPLICABLE_QUORUM; gf_boolean_t follows_quorum = _gf_false; + gf_boolean_t quorum_status_unchanged = _gf_false; if (volinfo->status != GLUSTERD_STATUS_STARTED) { volinfo->quorum_status = NOT_APPLICABLE_QUORUM; @@ -343,9 +344,10 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, * the bricks that are down are brought up again. In this process it * also brings up the brick that is purposefully taken down. */ - if (quorum_status != NOT_APPLICABLE_QUORUM && - volinfo->quorum_status == quorum_status) + if (volinfo->quorum_status == quorum_status) { + quorum_status_unchanged = _gf_true; goto out; + } if (quorum_status == MEETS_QUORUM) { gf_msg (this->name, GF_LOG_CRITICAL, 0, @@ -370,9 +372,10 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, if (!brickinfo->start_triggered) { pthread_mutex_lock (&brickinfo->restart_mutex); { - glusterd_brick_start (volinfo, - brickinfo, - _gf_false); + ret = glusterd_brick_start (volinfo, + brickinfo, + _gf_false, + _gf_false); } pthread_mutex_unlock (&brickinfo->restart_mutex); } @@ -394,6 +397,20 @@ glusterd_do_volume_quorum_action (xlator_t *this, glusterd_volinfo_t *volinfo, } } out: + if (quorum_status_unchanged) { + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + if (!glusterd_is_local_brick (this, volinfo, brickinfo)) + continue; + ret = glusterd_brick_start (volinfo, brickinfo, + _gf_false, _gf_true); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, + GD_MSG_BRICK_DISCONNECTED, "Failed to " + "connect to %s:%s", brickinfo->hostname, + brickinfo->path); + } + } + } return; } |