From 81684fecc7ac6412764f05acd0cb51929a1569a2 Mon Sep 17 00:00:00 2001 From: Atin Mukherjee Date: Thu, 9 Feb 2017 12:56:38 +0530 Subject: glusterd: ignore return code of glusterd_restart_bricks When GlusterD is restarted on a multi node cluster, while syncing the global options from other GlusterD, it checks for quorum and based on which it decides whether to stop/start a brick. However we handle the return code of this function in which case if we don't want to start any bricks the ret will be non zero and we will end up failing the import which is incorrect. Fix is just to ignore the ret code of glusterd_restart_bricks () >Reviewed-on: https://review.gluster.org/16574 >Smoke: Gluster Build System >NetBSD-regression: NetBSD Build System >CentOS-regression: Gluster Build System >Reviewed-by: Samikshan Bairagya >Reviewed-by: Jeff Darcy >(cherry picked from commit 55625293093d485623f3f3d98687cd1e2c594460) Change-Id: I37766b0bba138d2e61d3c6034bd00e93ba43e553 BUG: 1420991 Signed-off-by: Atin Mukherjee Reviewed-on: https://review.gluster.org/16593 Smoke: Gluster Build System CentOS-regression: Gluster Build System Reviewed-by: Samikshan Bairagya NetBSD-regression: NetBSD Build System Reviewed-by: Shyamsundar Ranganathan --- tests/bugs/glusterd/bug-1420637-volume-sync-fix.t | 40 +++++++++++++++++++++++ xlators/mgmt/glusterd/src/glusterd-utils.c | 12 ++----- 2 files changed, 43 insertions(+), 9 deletions(-) create mode 100644 tests/bugs/glusterd/bug-1420637-volume-sync-fix.t diff --git a/tests/bugs/glusterd/bug-1420637-volume-sync-fix.t b/tests/bugs/glusterd/bug-1420637-volume-sync-fix.t new file mode 100644 index 00000000000..0bd9988f6be --- /dev/null +++ b/tests/bugs/glusterd/bug-1420637-volume-sync-fix.t @@ -0,0 +1,40 @@ +#!/bin/bash + +# Test case for checking when server-quorum-ratio value is changed on one +# glusterd where the other is down, the other changes done get synced back +properly when the glusterd is brought up. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + +cleanup; + +TEST launch_cluster 2 + +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +# Lets create & start the volume +TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1 + +# Start the volume +TEST $CLI_1 volume start $V0 +TEST $CLI_1 volume set $V0 performance.readdir-ahead on + +# Bring down 2nd glusterd +TEST kill_glusterd 2 + +TEST $CLI_1 volume set all cluster.server-quorum-ratio 60 +TEST $CLI_1 volume set $V0 performance.readdir-ahead off + +# Bring back 2nd glusterd +TEST $glusterd_2 + +# After 2nd glusterd come back, there will be 2 nodes in a clusater +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count; + +EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio +EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead + +cleanup; diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 561bbb4af5b..5fd3727c023 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -4384,15 +4384,8 @@ glusterd_import_global_opts (dict_t *friend_data) * recompute if quorum is met. If quorum is not met bricks are * not started and those already running are stopped */ - if (old_quorum != new_quorum) { - ret = glusterd_restart_bricks (conf); - if (ret) { - gf_msg ("glusterd", GF_LOG_INFO, 0, - GD_MSG_SERVER_QUORUM_NOT_MET, - "Restarting bricks failed"); - goto out; - } - } + if (old_quorum != new_quorum) + glusterd_restart_bricks (conf); } ret = 0; @@ -5456,6 +5449,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf) } } } + ret = 0; out: conf->restart_done = _gf_true; -- cgit