From a60fc2ddc03134fb23c5ed5c0bcb195e1649416b Mon Sep 17 00:00:00 2001 From: Sanju Rakonde Date: Wed, 21 Feb 2018 12:46:25 +0530 Subject: glusterd: handling brick termination in brick-mux Problem: There's a race between the last glusterfs_handle_terminate() response sent to glusterd and the kill that happens immediately if the terminated brick is the last brick. Solution: When it is a last brick for the brick process, instead of glusterfsd killing itself, glusterd will kill the process in case of brick multiplexing. And also changing gf_attach utility accordingly. Change-Id: I386c19ca592536daa71294a13d9fc89a26d7e8c0 fixes: bz#1545048 BUG: 1545048 Signed-off-by: Sanju Rakonde --- tests/bugs/glusterd/stale-brick-proc-brick-mux.t | 32 ++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 tests/bugs/glusterd/stale-brick-proc-brick-mux.t (limited to 'tests') diff --git a/tests/bugs/glusterd/stale-brick-proc-brick-mux.t b/tests/bugs/glusterd/stale-brick-proc-brick-mux.t new file mode 100644 index 00000000000..a3efe273898 --- /dev/null +++ b/tests/bugs/glusterd/stale-brick-proc-brick-mux.t @@ -0,0 +1,32 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +function count_brick_processes { + pgrep glusterfsd | wc -l +} + +cleanup; + +TEST launch_cluster 2 +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +#bug-1549996 - stale brick processes on the nodes after volume deletion + +TEST $CLI_1 volume set all cluster.brick-multiplex on +TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}{1..3} $H2:$B2/${V0}{1..3} +TEST $CLI_1 volume start $V0 + +TEST $CLI_1 volume create $V1 replica 3 $H1:$B1/${V1}{1..3} $H2:$B2/${V1}{1..3} +TEST $CLI_1 volume start $V1 + +EXPECT 2 count_brick_processes + +TEST $CLI_1 volume stop $V0 +TEST $CLI_1 volume stop $V1 + +EXPECT 0 count_brick_processes + +cleanup -- cgit