summaryrefslogtreecommitdiffstats
path: root/xlators/cluster/afr/src/afr-self-heal-data.c
diff options
context:
space:
mode:
authorRavishankar N <ravishankar@redhat.com>2018-01-28 13:50:47 +0530
committerjiffin tony Thottan <jthottan@redhat.com>2018-07-04 04:03:45 +0000
commitb715dfb39460d053db726c4150c5f209a1503bff (patch)
tree159389165a2bead8e7fb242556fffb77620fabcb /xlators/cluster/afr/src/afr-self-heal-data.c
parentb11866a596bcd16a9fc37317929471e9a4b1d299 (diff)
afr: don't treat all cases all bricks being blamed as split-brain
Problem: We currently don't have a roll-back/undoing of post-ops if quorum is not met. Though the FOP is still unwound with failure, the xattrs remain on the disk. Due to these partial post-ops and partial heals (healing only when 2 bricks are up), we can end up in split-brain purely from the afr xattrs point of view i.e each brick is blamed by atleast one of the others. These scenarios are hit when there is frequent connect/disconnect of the client/shd to the bricks while I/O or heal are in progress. Fix: Instead of undoing the post-op, pick a source based on the xattr values. If 2 bricks blame one, the blamed one must be treated as sink. If there is no majority, all are sources. Once we pick a source, self-heal will then do the heal instead of erroring out due to split-brain. Change-Id: I3d0224b883eb0945785ade0e9697a1c828aec0ae BUG: 1597123 Signed-off-by: Ravishankar N <ravishankar@redhat.com> (cherry picked from commit 0e6e8216823c2d9dafb81aae0f6ee3497c23d140)
Diffstat (limited to 'xlators/cluster/afr/src/afr-self-heal-data.c')
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-data.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c
index 8cf43f2807b..bcd0deccbd4 100644
--- a/xlators/cluster/afr/src/afr-self-heal-data.c
+++ b/xlators/cluster/afr/src/afr-self-heal-data.c
@@ -684,7 +684,7 @@ __afr_selfheal_data (call_frame_t *frame, xlator_t *this, fd_t *fd,
ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name, 0, 0,
data_lock);
{
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
+ if (ret < priv->child_count) {
gf_msg_debug (this->name, 0, "%s: Skipping "
"self-heal as only %d number "
"of subvolumes "
@@ -749,7 +749,7 @@ restore_time:
if (!is_arbiter_the_only_sink) {
ret = afr_selfheal_inodelk (frame, this, fd->inode, this->name,
0, 0, data_lock);
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
+ if (ret < priv->child_count) {
ret = -ENOTCONN;
did_sh = _gf_false;
goto skip_undo_pending;
@@ -878,7 +878,7 @@ afr_selfheal_data (call_frame_t *frame, xlator_t *this, inode_t *inode)
priv->sh_domain, 0, 0,
locked_on);
{
- if (ret < AFR_SH_MIN_PARTICIPANTS) {
+ if (ret < priv->child_count) {
gf_msg_debug (this->name, 0, "%s: Skipping "
"self-heal as only %d number of "
"subvolumes could be locked",