From 4da244caccd38a77de5428b6954f565219ef0719 Mon Sep 17 00:00:00 2001 From: Sanju Rakonde Date: Fri, 6 Apr 2018 01:53:45 +0530 Subject: glusterd: handling brick termination in brick-mux Problem: There's a race between the glusterfs_handle_terminate() response sent to glusterd from last brick of the process and the socket disconnect event that encounters after the brick process got killed. Solution: When it is a last brick for the brick process, instead of sending GLUSTERD_BRICK_TERMINATE to brick process, glusterd will kill the process (same as we do it in case of non brick multiplecing). The test case is added for https://bugzilla.redhat.com/show_bug.cgi?id=1549996 Change-Id: If94958cd7649ea48d09d6af7803a0f9437a85503 fixes: bz#1545048 Signed-off-by: Sanju Rakonde --- tests/bugs/glusterd/stale-brick-proc-brick-mux.t | 33 ++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 tests/bugs/glusterd/stale-brick-proc-brick-mux.t (limited to 'tests/bugs') diff --git a/tests/bugs/glusterd/stale-brick-proc-brick-mux.t b/tests/bugs/glusterd/stale-brick-proc-brick-mux.t new file mode 100644 index 00000000000..f0a89760bb6 --- /dev/null +++ b/tests/bugs/glusterd/stale-brick-proc-brick-mux.t @@ -0,0 +1,33 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +function count_brick_processes { + pgrep glusterfsd | wc -l +} + +cleanup; + +TEST launch_cluster 2 +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +#bug-1549996 - stale brick processes on the nodes after volume deletion + +TEST $CLI_1 volume set all cluster.brick-multiplex on +TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}{1..3} $H2:$B2/${V0}{1..3} +TEST $CLI_1 volume start $V0 + +TEST $CLI_1 volume create $V1 replica 3 $H1:$B1/${V1}{1..3} $H2:$B2/${V1}{1..3} +TEST $CLI_1 volume start $V1 + +EXPECT 2 count_brick_processes + +TEST $CLI_1 volume stop $V0 +TEST $CLI_1 volume stop $V1 + +EXPECT 0 count_brick_processes + +cleanup + -- cgit