summaryrefslogtreecommitdiffstats
path: root/tests/bugs/glusterd/quorum-validation.t
blob: 3cc3351b43b789c0e01c135810bc1031d6db57aa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#!/bin/bash

. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc

cleanup;

TEST launch_cluster 2

TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count

TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
TEST $CLI_1 volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1

#bug-1177132 - sync server quorum options when a node is brought up
TEST $CLI_1 volume set all cluster.server-quorum-ratio 52

#Bring down 2nd glusterd
TEST kill_glusterd 2
EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count

#bug-1104642 - sync server quorum options when a node is brought up
#set the volume all options from the 1st glusterd
TEST $CLI_1 volume set all cluster.server-quorum-ratio 80

# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
#(Command based on syncop framework)should fail
TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2
TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
TEST ! $CLI_1 volume set $V0 barrier enable

#quorum is not met, rebalance/profile start should fail
TEST ! $CLI_1 volume rebalance $V0 start
TEST ! $CLI_1 volume profile $V0 start

#bug-1690753 - Volume stop when quorum not met is successful
TEST ! $CLI_1 volume stop $V0

#Bring back the 2nd glusterd
TEST $glusterd_2

#verify whether the value has been synced
EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_1 all cluster.server-quorum-ratio
EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_2 all cluster.server-quorum-ratio

# Now quorum is meet.
# Add-brick, Remove-brick, volume-set command should success
TEST  $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
TEST  $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
TEST  $CLI_1 volume set $V0 barrier enable
TEST  $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop

EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1

## Stop the volume
TEST $CLI_1 volume stop $V0

## Bring down 2nd glusterd
TEST kill_glusterd 2

## Now quorum is not meet. Starting volume on 1st node should not success
TEST ! $CLI_1 volume start $V0

## Bring back 2nd glusterd
TEST $glusterd_2

# After 2nd glusterd come back, there will be 2 nodes in a cluster
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;

## Now quorum is meet. Starting volume on 1st node should be success.
TEST $CLI_1 volume start $V0

# Now re-execute the same profile command and this time it should succeed
TEST $CLI_1 volume profile $V0 start

#bug-1352277

EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1

TEST $CLI_1 volume set $V0 cluster.server-quorum-type none

# Bring down all the gluster processes
TEST killall_gluster

#bring back 1st glusterd and check whether the brick process comes back
TEST $glusterd_1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0

#enabling quorum should bring down the brick
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0

TEST $glusterd_2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1

#bug-1367478 - brick processes should not be up when quorum is not met
TEST $CLI_1 volume create $V1 $H1:$B1/${V1}1 $H2:$B2/${V1}2
TEST $CLI_1 volume start $V1

EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H1 $B1/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H2 $B2/${V1}2

# Restart 2nd glusterd
TEST kill_glusterd 2
TEST $glusterd_2

# Check if all bricks are up
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H1 $B1/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H2 $B2/${V1}2

cleanup