From dae553d2538cf465edfff567909b846025762a3e Mon Sep 17 00:00:00 2001 From: Atin Mukherjee Date: Thu, 9 Feb 2017 12:56:38 +0530 Subject: glusterd: ignore return code of glusterd_restart_bricks When GlusterD is restarted on a multi node cluster, while syncing the global options from other GlusterD, it checks for quorum and based on which it decides whether to stop/start a brick. However we handle the return code of this function in which case if we don't want to start any bricks the ret will be non zero and we will end up failing the import which is incorrect. Fix is just to ignore the ret code of glusterd_restart_bricks () >Reviewed-on: https://review.gluster.org/16574 >Smoke: Gluster Build System >NetBSD-regression: NetBSD Build System >CentOS-regression: Gluster Build System >Reviewed-by: Samikshan Bairagya >Reviewed-by: Jeff Darcy >(cherry picked from commit 55625293093d485623f3f3d98687cd1e2c594460) Change-Id: I37766b0bba138d2e61d3c6034bd00e93ba43e553 BUG: 1420993 Signed-off-by: Atin Mukherjee Reviewed-on: https://review.gluster.org/16594 Smoke: Gluster Build System Reviewed-by: Samikshan Bairagya CentOS-regression: Gluster Build System NetBSD-regression: NetBSD Build System --- tests/bugs/glusterd/bug-1420637-volume-sync-fix.t | 40 +++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 tests/bugs/glusterd/bug-1420637-volume-sync-fix.t (limited to 'tests') diff --git a/tests/bugs/glusterd/bug-1420637-volume-sync-fix.t b/tests/bugs/glusterd/bug-1420637-volume-sync-fix.t new file mode 100644 index 00000000000..0bd9988f6be --- /dev/null +++ b/tests/bugs/glusterd/bug-1420637-volume-sync-fix.t @@ -0,0 +1,40 @@ +#!/bin/bash + +# Test case for checking when server-quorum-ratio value is changed on one +# glusterd where the other is down, the other changes done get synced back +properly when the glusterd is brought up. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + +cleanup; + +TEST launch_cluster 2 + +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +# Lets create & start the volume +TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1 + +# Start the volume +TEST $CLI_1 volume start $V0 +TEST $CLI_1 volume set $V0 performance.readdir-ahead on + +# Bring down 2nd glusterd +TEST kill_glusterd 2 + +TEST $CLI_1 volume set all cluster.server-quorum-ratio 60 +TEST $CLI_1 volume set $V0 performance.readdir-ahead off + +# Bring back 2nd glusterd +TEST $glusterd_2 + +# After 2nd glusterd come back, there will be 2 nodes in a clusater +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count; + +EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio +EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead + +cleanup; -- cgit