summaryrefslogtreecommitdiffstats
path: root/tests/bugs/glusterd/optimized-basic-testcases.t
blob: 110f1b92daee46ef91818944aac79222750dd1e8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
#!/bin/bash

. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../nfs.rc

function get_opret_value () {
  local VOL=$1
  $CLI volume info $VOL --xml | sed -ne 's/.*<opRet>\([-0-9]*\)<\/opRet>/\1/p'
}

function check_brick()
{
        vol=$1;
        num=$2
        $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
}

function brick_count()
{
        local vol=$1;

        $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
}

function get_brick_host_uuid()
{
    local vol=$1;
    local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
    local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");

    echo $host_uuid_list | awk '{print $1}'
}

function generate_statedump_and_check_for_glusterd_info {
        pid=`pidof glusterd`
        #remove old stale statedumps
        cleanup_statedump $pid
        kill -USR1 $pid
        #Wait till the statedump is generated
        sleep 1
        fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.")
        cat $statedumpdir/$fname | grep "xlator.glusterd.priv" | wc -l
}

cleanup;

TEST glusterd;
TEST pidof glusterd;

#bug-1238135-lazy-daemon-initialization-on-demand

GDWD=$($CLI system getwd)

# glusterd.info file will be created on either first peer probe or volume
# creation, hence we expect file to be not present in this case
TEST ! -e $GDWD/glusterd.info

#bug-913487 - setting volume options before creation of volume should fail

TEST ! $CLI volume set $V0 performance.open-behind off;
TEST pidof glusterd;

#bug-1433578 - glusterd should not crash after probing a invalid peer

TEST ! $CLI peer probe invalid-peer
TEST pidof glusterd;

TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
EXPECT 'Created' volinfo_field $V0 'Status';

#bug-955588 - uuid validation

uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
EXPECT $uuid get_brick_host_uuid $V0
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;

#bug-958790 - set options from file

touch $GLUSTERD_WORKDIR/groups/test
echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test

TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
TEST $CLI volume set $V0 group test
EXPECT "off" volume_option $V0 performance.read-ahead
EXPECT "off" volume_option $V0 performance.open-behind

TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';

#bug-1321836 - validate opret value for non existing volume

EXPECT 0 get_opret_value $V0
EXPECT -1 get_opret_value "novol"

EXPECT '2' brick_count $V0

#bug-862834 - validate brick status

EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';

TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;

#bug-1482344 - setting volume-option-at-cluster-level should not result in glusterd crash

TEST ! $CLI volume set all transport.listen-backlog 128

# Check the volume info output, if glusterd would have crashed then this command
# will fail
TEST $CLI volume info $V0;

#bug-1002556 and bug-1199451 - command should retrieve current op-version of the node
TEST $CLI volume get all cluster.op-version

#bug-1315186 - reject-lowering-down-op-version

OP_VERS_ORIG=$(grep 'operating-version' ${GDWD}/glusterd.info | cut -d '=' -f 2)
OP_VERS_NEW=`expr $OP_VERS_ORIG-1`

TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW

#bug-1022055 - validate log rotate command

TEST $CLI volume log rotate $V0;

#bug-1092841 - validating barrier enable/disable

TEST $CLI volume barrier $V0 enable;
TEST ! $CLI volume barrier $V0 enable;

TEST $CLI volume barrier $V0 disable;
TEST ! $CLI volume barrier $V0 disable;

#bug-1095097 - validate volume profile command

TEST $CLI volume profile $V0 start
TEST $CLI volume profile $V0 info

#bug-839595 - validate server-quorum options

TEST $CLI volume set $V0 cluster.server-quorum-type server
EXPECT "server" volume_option $V0 cluster.server-quorum-type
TEST $CLI volume set $V0 cluster.server-quorum-type none
EXPECT "none" volume_option $V0 cluster.server-quorum-type
TEST $CLI volume reset $V0 cluster.server-quorum-type
TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
TEST ! $CLI volume set all cluster.server-quorum-type none
TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100

TEST ! $CLI volume set all cluster.server-quorum-ratio abc
TEST ! $CLI volume set all cluster.server-quorum-ratio -1
TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
TEST $CLI volume set all cluster.server-quorum-ratio 0
EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
TEST $CLI volume set all cluster.server-quorum-ratio 100
EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
TEST $CLI volume set all cluster.server-quorum-ratio 100%
EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio

#bug-1265479 - validate-distributed-volume-options

#Setting data-self-heal option on for distribute volume
TEST ! $CLI volume set $V0 data-self-heal on
EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
TEST ! $CLI volume set $V0 cluster.data-self-heal on
EXPECT '' volinfo_field $V0 'cluster.data-self-heal';

#Setting metadata-self-heal option on for distribute volume
TEST ! $CLI volume set $V0 metadata-self-heal on
EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
TEST ! $CLI volume set $V0 cluster.metadata-self-heal on
EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';

#Setting entry-self-heal option on for distribute volume
TEST ! $CLI volume set $V0 entry-self-heal on
EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
TEST ! $CLI volume set $V0 cluster.entry-self-heal on
EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';

#bug-1163108 - validate min-free-disk-option

## Setting invalid value for option cluster.min-free-disk should fail
TEST ! $CLI volume set $V0 min-free-disk ""
TEST ! $CLI volume set $V0 min-free-disk 143.!/12
TEST ! $CLI volume set $V0 min-free-disk 123%
TEST ! $CLI volume set $V0 min-free-disk 194.34%

## Setting fractional value as a size (unit is byte) for option
## cluster.min-free-disk should fail
TEST ! $CLI volume set $V0 min-free-disk 199.051
TEST ! $CLI volume set $V0 min-free-disk 111.999

## Setting valid value for option cluster.min-free-disk should pass
TEST  $CLI volume set $V0 min-free-disk 12%
TEST  $CLI volume set $V0 min-free-disk 56.7%
TEST  $CLI volume set $V0 min-free-disk 120
TEST  $CLI volume set $V0 min-free-disk 369.0000

#bug-1179175-uss-option-validation

## Set features.uss option with non-boolean value. These non-boolean value
## for features.uss option should fail.
TEST ! $CLI volume set $V0 features.uss abcd
TEST ! $CLI volume set $V0 features.uss #$#$
TEST ! $CLI volume set $V0 features.uss 2324

## Setting other options with valid value. These options should succeed.
TEST $CLI volume set $V0 barrier enable
TEST $CLI volume set $V0 ping-timeout 60

## Set features.uss option with valid boolean value. It should succeed.
TEST  $CLI volume set $V0 features.uss enable
TEST  $CLI volume set $V0 features.uss disable


## Setting other options with valid value. These options should succeed.
TEST $CLI volume set $V0 barrier enable
TEST $CLI volume set $V0 ping-timeout 60

#bug-1209329 - daemon-svcs-on-reset-volume

##enable the bitrot and verify bitd is running or not
TEST $CLI volume bitrot $V0 enable
EXPECT 'on' volinfo_field $V0 'features.bitrot'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count

##Do reset force which set the bitrot options to default
TEST $CLI volume reset $V0 force;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_bitd_count

##enable the uss option and verify snapd is running or not
TEST $CLI volume set $V0 features.uss on
EXPECT 'on' volinfo_field $V0 'features.uss'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count

##Do reset force which set the uss options to default
TEST $CLI volume reset $V0 force;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count

##verify initial nfs disabled by default
EXPECT "0" get_nfs_count

##enable nfs and verify
TEST $CLI volume set $V0 nfs.disable off
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
EXPECT "1" get_nfs_count

##Do reset force which set the nfs.option to default
TEST $CLI volume reset $V0 force;
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count

##enable the uss option and verify snapd is running or not
TEST $CLI volume set $V0 features.uss on
EXPECT 'on' volinfo_field $V0 'features.uss'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count

##Disable the uss option using set command and verify snapd
TEST $CLI volume set $V0 features.uss  off
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count

##enable nfs.disable and verify
TEST $CLI volume set $V0 nfs.disable on
EXPECT 'on' volinfo_field $V0 'nfs.disable'
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count

## disable nfs.disable option using set command
TEST $CLI volume set $V0 nfs.disable  off
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count

TEST $CLI volume info;
TEST $CLI volume create $V1 $H0:$B0/${V1}1
TEST $CLI volume start $V1
pkill glusterd;
pkill glusterfsd;
TEST glusterd
TEST $CLI volume status $V1

#bug-853601 - Avoid using /var/lib/glusterd as a brick
TEST ! $CLI volume create "test" $H0:/var/lib/glusterd
TEST ! $CLI volume create "test" $H0:/var/lib/glusterd force
TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc
TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc force
mkdir -p /xyz/var/lib/glusterd/abc
TEST  $CLI volume create "test" $H0:/xyz/var/lib/glusterd/abc
EXPECT 'Created' volinfo_field "test" 'Status';

#While taking a statedump, there is a TRY_LOCK on call_frame, which might may cause
#failure. So Adding a EXPECT_WITHIN
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" generate_statedump_and_check_for_glusterd_info

cleanup_statedump `pidof glusterd`
cleanup