diff options
-rw-r--r-- | tests/basic/afr/arbiter-mount.t | 44 | ||||
-rw-r--r-- | tests/basic/afr/arbiter.t | 14 | ||||
-rw-r--r-- | xlators/cluster/afr/src/afr-common.c | 13 |
3 files changed, 67 insertions, 4 deletions
diff --git a/tests/basic/afr/arbiter-mount.t b/tests/basic/afr/arbiter-mount.t new file mode 100644 index 00000000000..47c327633f3 --- /dev/null +++ b/tests/basic/afr/arbiter-mount.t @@ -0,0 +1,44 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../afr.rc +. $(dirname $0)/../../nfs.rc +cleanup; + +#Check that mounting fails when only arbiter brick is up. + +TEST glusterd; +TEST pidof glusterd + +TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2} +TEST $CLI volume start $V0 +EXPECT 'Started' volinfo_field $V0 'Status' +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +TEST kill_brick $V0 $H0 $B0/${V0}0 +TEST kill_brick $V0 $H0 $B0/${V0}1 + +# Doing `mount -t glusterfs $H0:$V0 $M0` fails right away but doesn't work on NetBSD +# So check that stat <mount> fails instead. +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +TEST ! stat $M0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +mount_nfs $H0:/$V0 $N0 +TEST [ $? -ne 0 ] + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" brick_up_status $V0 $H0 $B0/${V0}0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" brick_up_status $V0 $H0 $B0/${V0}1 +EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available; + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 +TEST stat $M0 +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 + +mount_nfs $H0:/$V0 $N0 +TEST [ $? -eq 0 ] +EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0 + +cleanup diff --git a/tests/basic/afr/arbiter.t b/tests/basic/afr/arbiter.t index be8f676d1ec..c91e2e90098 100644 --- a/tests/basic/afr/arbiter.t +++ b/tests/basic/afr/arbiter.t @@ -20,9 +20,13 @@ TEST $CLI volume delete $V0 # Create and mount a replica 3 arbiter volume. TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2} TEST $CLI volume set $V0 performance.write-behind off +TEST $CLI volume set $V0 performance.stat-prefetch off TEST $CLI volume set $V0 cluster.self-heal-daemon off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off TEST $CLI volume start $V0 -TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0; +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0; TEST stat $M0/.meta/graphs/active/$V0-replicate-0/options/arbiter-count EXPECT "1" cat $M0/.meta/graphs/active/$V0-replicate-0/options/arbiter-count @@ -40,9 +44,11 @@ TEST kill_brick $V0 $H0 $B0/${V0}1 echo "B2 is down, B3 is the only source, writes will fail" >> $M0/file EXPECT_NOT "0" echo $? TEST ! cat $M0/file -# Metadata I/O should still succeed. -TEST getfattr -n user.name $M0/file -TEST setfattr -n user.name -v value3 $M0/file +# Though metadata IO could have been served from arbiter, we do not allow it +# anymore as FOPS like getfattr could be overloaded to return iatt buffers for +# use by other translators. +TEST ! getfattr -n user.name $M0/file +TEST ! setfattr -n user.name -v value3 $M0/file #shd should not data self-heal from arbiter to the sinks. TEST $CLI volume set $V0 cluster.self-heal-daemon on diff --git a/xlators/cluster/afr/src/afr-common.c b/xlators/cluster/afr/src/afr-common.c index 36c22ad7dc0..bfff6048799 100644 --- a/xlators/cluster/afr/src/afr-common.c +++ b/xlators/cluster/afr/src/afr-common.c @@ -695,6 +695,10 @@ afr_replies_interpret (call_frame_t *frame, xlator_t *this, inode_t *inode) data_readable[i] = 1; metadata_readable[i] = 1; } + if (AFR_IS_ARBITER_BRICK (priv, ARBITER_BRICK_INDEX)) { + data_readable[ARBITER_BRICK_INDEX] = 0; + metadata_readable[ARBITER_BRICK_INDEX] = 0; + } for (i = 0; i < priv->child_count; i++) { if (!replies[i].valid) { @@ -1792,9 +1796,14 @@ unwind: read_subvol = spb_choice; else read_subvol = afr_first_up_child (frame, this); + } par_read_subvol = afr_get_parent_read_subvol (this, parent, replies, readable); + if (AFR_IS_ARBITER_BRICK (priv, read_subvol) && local->op_ret == 0) { + local->op_ret = -1; + local->op_errno = ENOTCONN; + } AFR_STACK_UNWIND (lookup, frame, local->op_ret, local->op_errno, local->inode, &local->replies[read_subvol].poststat, @@ -2241,6 +2250,10 @@ unwind: else read_subvol = afr_first_up_child (frame, this); } + if (AFR_IS_ARBITER_BRICK (priv, read_subvol) && local->op_ret == 0) { + local->op_ret = -1; + local->op_errno = ENOTCONN; + } AFR_STACK_UNWIND (lookup, frame, local->op_ret, local->op_errno, local->inode, &local->replies[read_subvol].poststat, |