diff options
Diffstat (limited to 'tests/basic/afr/arbiter.t')
| -rw-r--r-- | tests/basic/afr/arbiter.t | 34 |
1 files changed, 28 insertions, 6 deletions
diff --git a/tests/basic/afr/arbiter.t b/tests/basic/afr/arbiter.t index 84d2ccece51..7c92a9fe6c9 100644 --- a/tests/basic/afr/arbiter.t +++ b/tests/basic/afr/arbiter.t @@ -9,20 +9,40 @@ TEST glusterd; TEST pidof glusterd # Non arbiter replica 3 volumes should not have arbiter-count option enabled. +TEST mkdir -p $B0/${V0}{0,1,2} TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0; +EXPECT 'Started' volinfo_field $V0 'Status' +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; TEST ! stat $M0/.meta/graphs/active/$V0-replicate-0/options/arbiter-count EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 TEST $CLI volume stop $V0 TEST $CLI volume delete $V0 +# Make sure we clean up *all the way* so we don't get "brick X is already part +# of a volume" errors. +cleanup; +TEST glusterd; +TEST pidof glusterd + # Create and mount a replica 3 arbiter volume. +TEST mkdir -p $B0/${V0}{0,1,2} TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2} TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0; +EXPECT 'Started' volinfo_field $V0 'Status' +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; TEST stat $M0/.meta/graphs/active/$V0-replicate-0/options/arbiter-count EXPECT "1" cat $M0/.meta/graphs/active/$V0-replicate-0/options/arbiter-count @@ -40,16 +60,18 @@ TEST kill_brick $V0 $H0 $B0/${V0}1 echo "B2 is down, B3 is the only source, writes will fail" >> $M0/file EXPECT_NOT "0" echo $? TEST ! cat $M0/file -# Metadata I/O should still succeed. -TEST getfattr -n user.name $M0/file -TEST setfattr -n user.name -v value3 $M0/file +# Though metadata IO could have been served from arbiter, we do not allow it +# anymore as FOPS like getfattr could be overloaded to return iatt buffers for +# use by other translators. +TEST ! getfattr -n user.name $M0/file +TEST ! setfattr -n user.name -v value3 $M0/file #shd should not data self-heal from arbiter to the sinks. TEST $CLI volume set $V0 cluster.self-heal-daemon on EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 -TEST $CLI volume heal $V0 +$CLI volume heal $V0 EXPECT_WITHIN $HEAL_TIMEOUT '1' echo $(count_sh_entries $B0/$V0"1") EXPECT_WITHIN $HEAL_TIMEOUT '1' echo $(count_sh_entries $B0/$V0"2") |
