summaryrefslogtreecommitdiffstats
path: root/tests/basic/afr/afr-read-hash-mode.t
blob: eeff10d8ebd10c6515e1f937b388fdfd938c9512 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/bin/bash

. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc

cleanup

function reads_brick_count {
        $CLI volume profile $V0 info incremental | grep -w READ | wc -l
}

TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0..2}

TEST $CLI volume set $V0 cluster.choose-local off
TEST $CLI volume set $V0 performance.quick-read off
TEST $CLI volume set $V0 performance.io-cache off
TEST $CLI volume set $V0 performance.write-behind off
TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 performance.read-ahead off
TEST $CLI volume start $V0

# Disable all caching
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
TEST dd if=/dev/urandom of=$M0/FILE bs=1M count=8
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0

# TEST if the option gives the intended behavior. The way we perform this test
# is by performing reads from the mount and write to /dev/null. If the
# read-hash-mode is 3, then for a given file, more than 1 brick should serve the
# read-fops where as with the default read-hash-mode (i.e. 1), only 1 brick will.

# read-hash-mode=1
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
EXPECT "1" mount_get_option_value $M0 $V0-replicate-0 read-hash-mode
TEST $CLI volume profile $V0 start
TEST dd if=$M0/FILE of=/dev/null bs=1M
count=`reads_brick_count`
TEST [ $count -eq 1 ]
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0

# read-hash-mode=3
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
TEST $CLI volume set $V0 cluster.read-hash-mode 3
EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "3" mount_get_option_value $M0 $V0-replicate-0 read-hash-mode
TEST $CLI volume profile $V0 info clear
TEST dd if=$M0/FILE of=/dev/null bs=1M
count=`reads_brick_count`
TEST [ $count -eq 2 ]

# Check that the arbiter did not serve any reads
arbiter_reads=$($CLI volume top $V0 read brick $H0:$B0/${V0}2|grep FILE|awk '{print $1}')
TEST [ -z $arbiter_reads ]

cleanup;