summaryrefslogtreecommitdiffstats
path: root/tests/basic/afr/arbiter.t
diff options
context:
space:
mode:
authorRavishankar N <ravishankar@redhat.com>2015-05-19 23:09:42 +0530
committerPranith Kumar Karampuri <pkarampu@redhat.com>2015-05-24 02:20:19 -0700
commitb51ee5f8d1f80d66effffc06c1e49099c04014a4 (patch)
tree90660d80ca67fb1f22e438be4568f6720b2fac0e /tests/basic/afr/arbiter.t
parentdbad74f1e739c6d1f48b8af392c1eee1a7254015 (diff)
tests: arbiter.t fix
Wait for AFR's children to be up in glustershd process before attempting heal. Also, grep (version 2.21) is detecting statedump files as binary, causing tests to succeed incorrectly. Hence adding the -a switch to force it to treat it as a text file. Thanks to Vijay Bellur for identifying the issue (http://lists.gnu.org/archive/html/bug-grep/2015-05/msg00000.html) and the workaround. Change-Id: Ie3d9591ffaf44baa0cd8c2baa327aed24378e3df BUG: 1163543 Signed-off-by: Ravishankar N <ravishankar@redhat.com> Reviewed-on: http://review.gluster.org/10833 Tested-by: NetBSD Build System Tested-by: Niels de Vos <ndevos@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
Diffstat (limited to 'tests/basic/afr/arbiter.t')
-rw-r--r--tests/basic/afr/arbiter.t8
1 files changed, 7 insertions, 1 deletions
diff --git a/tests/basic/afr/arbiter.t b/tests/basic/afr/arbiter.t
index a9d485cd7b4..8a983fb0577 100644
--- a/tests/basic/afr/arbiter.t
+++ b/tests/basic/afr/arbiter.t
@@ -37,7 +37,8 @@ TEST setfattr -n user.name -v value2 $M0/file
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST kill_brick $V0 $H0 $B0/${V0}1
-TEST `echo "B2 is down, B3 is the only source, writes will fail" >> $M0/file`
+echo "B2 is down, B3 is the only source, writes will fail" >> $M0/file
+EXPECT_NOT "0" echo $?
TEST ! cat $M0/file
# Metadata I/O should still succeed.
TEST getfattr -n user.name $M0/file
@@ -46,12 +47,17 @@ TEST setfattr -n user.name -v value3 $M0/file
#shd should not data self-heal from arbiter to the sinks.
TEST $CLI volume set $V0 cluster.self-heal-daemon on
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT '1' echo $(count_sh_entries $B0/$V0"1")
EXPECT_WITHIN $HEAL_TIMEOUT '1' echo $(count_sh_entries $B0/$V0"2")
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST $CLI volume heal $V0
EXPECT 0 afr_get_pending_heal_count $V0