summaryrefslogtreecommitdiffstats
path: root/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t
blob: 780ddb9250cf2fb6697f72ff1917256413f909ed (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#!/bin/bash

. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc

cleanup;

#Test that for files in metadata-split-brain, we do not wind even a single read.
TEST glusterd
TEST pidof glusterd

TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}

TEST $CLI volume set $V0 self-heal-daemon off
TEST $CLI volume set $V0 performance.read-ahead off
TEST $CLI volume set $V0 performance.write-behind off
TEST $CLI volume set $V0 performance.io-cache off
TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 performance.quick-read off
TEST $CLI volume set $V0 performance.open-behind off
TEST $CLI volume start $V0

TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024

TEST kill_brick $V0 $H0 $B0/${V0}0
TEST chmod 700 $M0/file
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST kill_brick $V0 $H0 $B0/${V0}1
TEST chmod 777 $M0/file
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST umount $M0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0

lines=`cat $M0/file|wc|awk '{print $1}'`
EXPECT 0  echo $lines
TEST umount $M0
cleanup