summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPranith Kumar K <pkarampu@redhat.com>2016-07-22 07:48:27 +0530
committerPranith Kumar Karampuri <pkarampu@redhat.com>2016-07-28 07:03:08 -0700
commit264c7496c875914fcaf8bded44d61e284633c719 (patch)
tree19cf6e10aaead7a4cc930a066264f614edcd3fe3
parent56ca0b14aaf4e6daddc2b787765db659b1c2ff1b (diff)
tests: Fix spurious failures with split-brain-favorite-child-policy.t
Problem: It is not guranteed that the self-heal daemon would apply the new option as soon as volume set is executed because all the command gurantees is that the process is notified of the change in volfile. Shd still needs to fetch volfile and reconfigure. If the next volume heal command comes even before the reconfigure happens, then the heal won't happen. Fix: Restart shd to make sure it has the option loaded with new value. >BUG: 1358976 >Change-Id: I3ed30ebbec17bd06caa632e79e9412564f431b19 >Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> >Reviewed-on: http://review.gluster.org/14978 >Smoke: Gluster Build System <jenkins@build.gluster.org> >Reviewed-by: Krutika Dhananjay <kdhananj@redhat.com> >Tested-by: Jeff Darcy <jdarcy@redhat.com> >NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> >CentOS-regression: Gluster Build System <jenkins@build.gluster.org> >Reviewed-by: Jeff Darcy <jdarcy@redhat.com> BUG: 1360573 Change-Id: I09e097dbdc2cae659ad1617d336945eb804b09a5 Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> Reviewed-on: http://review.gluster.org/15022 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Ravishankar N <ravishankar@redhat.com>
-rw-r--r--tests/basic/afr/split-brain-favorite-child-policy.t17
1 files changed, 17 insertions, 0 deletions
diff --git a/tests/basic/afr/split-brain-favorite-child-policy.t b/tests/basic/afr/split-brain-favorite-child-policy.t
index 66fcd67a031..7a14852685c 100644
--- a/tests/basic/afr/split-brain-favorite-child-policy.t
+++ b/tests/basic/afr/split-brain-favorite-child-policy.t
@@ -45,6 +45,10 @@ EXPECT "1" echo $?
#We know that the first brick has latest ctime.
LATEST_CTIME_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
TEST $CLI volume set $V0 cluster.favorite-child-policy ctime
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
cat $M0/file > /dev/null
@@ -79,6 +83,10 @@ EXPECT "1" echo $?
#We know that the second brick has latest mtime.
LATEST_CTIME_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
cat $M0/file > /dev/null
@@ -113,6 +121,10 @@ EXPECT "1" echo $?
#We know that the second brick has the bigger size file.
BIGGER_FILE_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
TEST $CLI volume set $V0 cluster.favorite-child-policy size
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
cat $M0/file > /dev/null
@@ -164,6 +176,11 @@ EXPECT "1" echo $?
#We know that the second and third bricks agree with each other. Pick any one of them.
MAJORITY_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
TEST $CLI volume set $V0 cluster.favorite-child-policy majority
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
cat $M0/file > /dev/null