summaryrefslogtreecommitdiffstats
path: root/tests/basic/volume-scale-shd-mux.t
blob: 01746327d91768a4f7fc59823a0a4dc5790a811f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
#!/bin/bash

. $(dirname $0)/../include.rc
. $(dirname $0)/../volume.rc

cleanup;

TESTS_EXPECTED_IN_LOOP=6

TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST $CLI volume set $V0 cluster.eager-lock off
TEST $CLI volume set $V0 performance.flush-behind off
TEST $CLI volume start $V0

for i in $(seq 1 2); do
   TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
   TEST $CLI volume start ${V0}_afr$i
   TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
   TEST $CLI volume start ${V0}_ec$i
done

EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"

TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)

EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "afr_shd_index_healer"

#Remove the brick and check the detach is successful
$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"

EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "glusterfs_graph_cleanup"
TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)

EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"

#Remove the brick and check the detach is successful
$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force

EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"


for i in $(seq 1 2); do
   TEST $CLI volume stop ${V0}_afr$i
   TEST $CLI volume stop ${V0}_ec$i
done

EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"

TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0

TEST kill_brick $V0 $H0 $B0/${V0}0
TEST kill_brick $V0 $H0 $B0/${V0}4

TEST touch $M0/foo{1..100}

EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0

TEST $CLI volume start ${V0} force

EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0

TEST rm -rf $M0/*
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
shd_pid=$(get_shd_mux_pid $V0)
TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
TEST $CLI volume start ${V0}_distribute1

#Creating a non-replicate/non-ec volume should not have any effect in shd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
EXPECT "^${shd_pid}$" get_shd_mux_pid $V0

TEST mkdir $B0/add/
#Now convert the distributed volume to replicate
TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "afr_shd_index_healer"

#scale down the volume
TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"

#Before stopping the process, make sure there is no pending clenup threads hanging
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "glusterfs_graph_cleanup"

TEST $CLI volume stop ${V0}
TEST $CLI volume delete ${V0}
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count

TEST rm -rf $B0/add/2 $B0/add/3

#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "afr_shd_index_healer"

#Now convert the replica volume to distribute again and make sure the shd is now stopped
TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
TEST rm -rf $B0/add/

EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count

cleanup