From b51ee5f8d1f80d66effffc06c1e49099c04014a4 Mon Sep 17 00:00:00 2001 From: Ravishankar N Date: Tue, 19 May 2015 23:09:42 +0530 Subject: tests: arbiter.t fix Wait for AFR's children to be up in glustershd process before attempting heal. Also, grep (version 2.21) is detecting statedump files as binary, causing tests to succeed incorrectly. Hence adding the -a switch to force it to treat it as a text file. Thanks to Vijay Bellur for identifying the issue (http://lists.gnu.org/archive/html/bug-grep/2015-05/msg00000.html) and the workaround. Change-Id: Ie3d9591ffaf44baa0cd8c2baa327aed24378e3df BUG: 1163543 Signed-off-by: Ravishankar N Reviewed-on: http://review.gluster.org/10833 Tested-by: NetBSD Build System Tested-by: Niels de Vos Tested-by: Gluster Build System Reviewed-by: Pranith Kumar Karampuri --- tests/basic/afr/arbiter.t | 8 +++++++- tests/volume.rc | 4 ++-- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/tests/basic/afr/arbiter.t b/tests/basic/afr/arbiter.t index a9d485cd7b4..8a983fb0577 100644 --- a/tests/basic/afr/arbiter.t +++ b/tests/basic/afr/arbiter.t @@ -37,7 +37,8 @@ TEST setfattr -n user.name -v value2 $M0/file TEST $CLI volume start $V0 force EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 TEST kill_brick $V0 $H0 $B0/${V0}1 -TEST `echo "B2 is down, B3 is the only source, writes will fail" >> $M0/file` +echo "B2 is down, B3 is the only source, writes will fail" >> $M0/file +EXPECT_NOT "0" echo $? TEST ! cat $M0/file # Metadata I/O should still succeed. TEST getfattr -n user.name $M0/file @@ -46,12 +47,17 @@ TEST setfattr -n user.name -v value3 $M0/file #shd should not data self-heal from arbiter to the sinks. TEST $CLI volume set $V0 cluster.self-heal-daemon on EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 TEST $CLI volume heal $V0 EXPECT_WITHIN $HEAL_TIMEOUT '1' echo $(count_sh_entries $B0/$V0"1") EXPECT_WITHIN $HEAL_TIMEOUT '1' echo $(count_sh_entries $B0/$V0"2") TEST $CLI volume start $V0 force EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 TEST $CLI volume heal $V0 EXPECT 0 afr_get_pending_heal_count $V0 diff --git a/tests/volume.rc b/tests/volume.rc index 5c5e28459c0..9e01fff687d 100644 --- a/tests/volume.rc +++ b/tests/volume.rc @@ -87,7 +87,7 @@ function cleanup_mount_statedump { function snap_client_connected_status { local vol=$1 local fpath=$(generate_mount_statedump $vol) - up=$(grep -A2 xlator.protocol.client.$vol-snapd-client.priv $fpath | tail -1 | cut -f 2 -d'=') + up=$(grep -a -A2 xlator.protocol.client.$vol-snapd-client.priv $fpath | tail -1 | cut -f 2 -d'=') rm -f $fpath echo "$up" } @@ -98,7 +98,7 @@ function _afr_child_up_status { local brick_id=$2 local gen_state_dump=$3 local fpath=$($gen_state_dump $vol) - up=$(grep -B1 trusted.afr.$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=') + up=$(grep -a -B1 trusted.afr.$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=') rm -f $fpath echo "$up" } -- cgit