summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGaurav Kumar Garg <ggarg@redhat.com>2015-07-14 14:01:14 +0530
committerAtin Mukherjee <amukherj@redhat.com>2015-07-23 20:51:38 -0700
commit64727ecddb48f6cb8e497c28276ea78a0fb97991 (patch)
tree9fb77ae4932c0a25e6bc7019a14562e732502070
parent75d50eaba3fd7d24874ba8acc9a776c863a932e2 (diff)
glusterd: Pass NULL in glusterd_svc_manager in glusterd_restart_bricks
On restarting glusterd quota daemon is not started when more than one volumes are configured and quota is enabled only on 2nd volume. This is because of while restarting glusterd it will restart all the bricks. During brick restart it will start respective daemon by passing volinfo of first volume. Passing volinfo to glusterd_svc_manager will imply daemon managers will take action based on the same volume's configuration which is incorrect for per node daemons. Fix is to pass volinfo NULL while restarting bricks. BUG: 1242882 Change-Id: Ie53fc452dc79811068a9397abca13c65de4a8359 Signed-off-by: Gaurav Kumar Garg <ggarg@redhat.com> Reviewed-on: http://review.gluster.org/11660 Tested-by: NetBSD Build System <jenkins@build.gluster.org> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
-rw-r--r--tests/bugs/glusterd/bug-1242882-do-not-pass-volinfo-quota.t38
-rw-r--r--tests/volume.rc4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c2
3 files changed, 43 insertions, 1 deletions
diff --git a/tests/bugs/glusterd/bug-1242882-do-not-pass-volinfo-quota.t b/tests/bugs/glusterd/bug-1242882-do-not-pass-volinfo-quota.t
new file mode 100644
index 00000000000..c229d4371b6
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1242882-do-not-pass-volinfo-quota.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create volume V0 and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+## Lets create volume V1 and start the volume
+TEST $CLI volume create $V1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume start $V1
+
+## Enable quota on 2nd volume
+TEST $CLI volume quota $V1 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+## Killing all gluster process
+pkill gluster;
+
+## there should not be any quota daemon running after killing quota process
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_quotad_count
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Quotad daemon should start on restarting the glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+cleanup;
diff --git a/tests/volume.rc b/tests/volume.rc
index e62db1b17fc..91cbb730501 100644
--- a/tests/volume.rc
+++ b/tests/volume.rc
@@ -543,6 +543,10 @@ function get_scrubd_count {
ps auxww | grep glusterfs | grep scrub.pid | grep -v grep | wc -l
}
+function get_quotad_count {
+ ps auxww | grep glusterfs | grep quotad.pid | grep -v grep | wc -l
+}
+
function quota_list_field () {
local QUOTA_PATH=$1
local FIELD=$2
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index f3413230e44..0d7284066b6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -4488,7 +4488,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
continue;
if (start_svcs == _gf_false) {
start_svcs = _gf_true;
- glusterd_svcs_manager (volinfo);
+ glusterd_svcs_manager (NULL);
}
gf_msg_debug (this->name, 0, "starting the volume %s",
volinfo->volname);