blob: baf629dbf9ba32f0a2b23466b8335c40d54e855c (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
|
#!/bin/bash
#This test tests that self-heals don't perform fsync when durability is turned
#off
. $(dirname $0)/../include.rc
. $(dirname $0)/../traps.rc
. $(dirname $0)/../volume.rc
function count_processes {
# It would generally be a good idea to use "pgrep -x" to ensure an
# exact match, but the version of pgrep we have on NetBSD (a.k.a.
# the worst operating system ever) doesn't support that option.
# Fortunately, "glusterfsd" isn't the prefix of any other name,
# so this works anyway. For now.
pgrep glusterfsd | wc -w
}
function count_brick_pids {
$CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
| grep -v "N/A" | sort | uniq | wc -l
}
cleanup
TEST glusterd
TEST $CLI volume set all cluster.brick-multiplex yes
# Create two vanilla volumes.
TEST $CLI volume create $V0 $H0:$B0/brick-${V0}-{0,1}
TEST $CLI volume create $V1 $H0:$B0/brick-${V1}-{0,1}
# Enable brick log-level to DEBUG
gluster v set $V0 diagnostics.brick-log-level DEBUG
# Start both.
TEST $CLI volume start $V0
TEST $CLI volume start $V1
# There should be only one process for compatible volumes. We can't use
# EXPECT_WITHIN here because it could transiently see one process as two are
# coming up, and yield a false positive.
sleep $PROCESS_UP_TIMEOUT
EXPECT "1" count_processes
EXPECT 1 count_brick_pids
# Make the second volume incompatible with the first.
TEST $CLI volume stop $V1
TEST $CLI volume set $V1 server.manage-gids no
TEST $CLI volume start $V1
# There should be two processes this time (can't share protocol/server).
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" count_processes
cleanup;
|