summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xtests/bugs/bitrot/1207029-bitrot-daemon-should-start-on-valid-node.t55
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitd-svc.c46
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c4
5 files changed, 91 insertions, 22 deletions
diff --git a/tests/bugs/bitrot/1207029-bitrot-daemon-should-start-on-valid-node.t b/tests/bugs/bitrot/1207029-bitrot-daemon-should-start-on-valid-node.t
new file mode 100755
index 00000000000..e5b53bd0d24
--- /dev/null
+++ b/tests/bugs/bitrot/1207029-bitrot-daemon-should-start-on-valid-node.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+## Test case for bitrot
+## bitd daemon should not start on the node which dont have any brick
+
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+
+## Start a 2 node virtual cluster
+TEST launch_cluster 2;
+
+## Peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+## Creating a volume which is having brick only on one node
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H1:$B1/${V0}1
+
+## Start the volume
+TEST $CLI_1 volume start $V0
+
+## Enable bitrot on volume
+TEST $CLI_1 volume bitrot $V0 enable
+
+## Bitd daemon should be running on the node which is having brick. Here node1
+## only have brick so bitrot daemon count value should be 1.
+bitrot_daemon=$(ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | wc -l)
+TEST [ "$bitrot_daemon" -eq 1 ];
+
+## Bitd daemon should not run on 2nd node and it should not create bitrot
+## volfile on this node. Below test case it to check whether its creating bitrot
+## volfile or not for 2nd node which dont have any brick.
+## Get current working directory of 2nd node which dont have any brick and do
+## stat on bitrot volfile.
+
+cur_wrk_dir2=$($CLI_2 system:: getwd)
+TEST ! stat $cur_wrk_dir2/bitd/bitd-server.vol
+
+
+## Bitd daemon should run on 1st node and it should create bitrot
+## volfile on this node. Below test case it to check whether its creating bitrot
+## volfile or not for 1st node which is having brick.
+## Get current working directory of 1st node which have brick and do
+## stat on bitrot volfile.
+
+cur_wrk_dir1=$($CLI_1 system:: getwd)
+TEST stat $cur_wrk_dir1/bitd/bitd-server.vol
+
+cleanup;
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
index d007f3a0a0d..14c60fd5fba 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitd-svc.c
@@ -70,26 +70,40 @@ out:
int
glusterd_bitdsvc_manager (glusterd_svc_t *svc, void *data, int flags)
{
- int ret = -EINVAL;
+ int ret = 0;
+ xlator_t *this = NULL;
+ glusterd_volinfo_t *volinfo = data;
+ glusterd_brickinfo_t *brickinfo = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (volinfo);
if (glusterd_all_volumes_with_bitrot_stopped ()) {
ret = svc->stop (svc, SIGTERM);
} else {
- ret = glusterd_bitdsvc_create_volfile ();
- if (ret)
- goto out;
-
- ret = svc->stop (svc, SIGKILL);
- if (ret)
- goto out;
-
- ret = svc->start (svc, flags);
- if (ret)
- goto out;
-
- ret = glusterd_conn_connect (&(svc->conn));
- if (ret)
- goto out;
+ cds_list_for_each_entry (brickinfo, &volinfo->bricks,
+ brick_list) {
+ if (!glusterd_is_local_brick (this, volinfo, brickinfo))
+ continue;
+
+ ret = glusterd_bitdsvc_create_volfile ();
+ if (ret)
+ goto out;
+
+ ret = svc->stop (svc, SIGKILL);
+ if (ret)
+ goto out;
+
+ ret = svc->start (svc, flags);
+ if (ret)
+ goto out;
+
+ ret = glusterd_conn_connect (&(svc->conn));
+ if (ret)
+ goto out;
+ break;
+ }
}
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
index ffc43698f95..083a714f596 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
@@ -310,7 +310,7 @@ glusterd_all_volumes_with_bitrot_stopped ()
}
static int
-glusterd_manage_bitrot (int opcode)
+glusterd_manage_bitrot (int opcode, glusterd_volinfo_t *volinfo)
{
int ret = -1;
xlator_t *this = NULL;
@@ -326,7 +326,7 @@ glusterd_manage_bitrot (int opcode)
case GF_BITROT_OPTION_TYPE_ENABLE:
case GF_BITROT_OPTION_TYPE_DISABLE:
ret = priv->bitd_svc.manager (&(priv->bitd_svc),
- NULL, PROC_START_NO_WAIT);
+ volinfo, PROC_START_NO_WAIT);
if (ret)
break;
ret = priv->scrub_svc.manager (&(priv->scrub_svc), NULL,
@@ -416,7 +416,7 @@ glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
goto out;
}
- ret = glusterd_manage_bitrot (type);
+ ret = glusterd_manage_bitrot (type, volinfo);
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
index 8ee715f1a94..b4280dcb9ba 100644
--- a/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
+++ b/xlators/mgmt/glusterd/src/glusterd-svc-helper.c
@@ -136,7 +136,7 @@ glusterd_svcs_manager (glusterd_volinfo_t *volinfo)
if (ret)
goto out;
- ret = conf->bitd_svc.manager (&(conf->bitd_svc), NULL,
+ ret = conf->bitd_svc.manager (&(conf->bitd_svc), volinfo,
PROC_START_NO_WAIT);
if (ret == -EINVAL)
ret = 0;
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 9ee97cf87b5..aa27ebb18d3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -4308,7 +4308,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
continue;
if (start_svcs == _gf_false) {
start_svcs = _gf_true;
- glusterd_svcs_manager (NULL);
+ glusterd_svcs_manager (volinfo);
}
gf_log (this->name, GF_LOG_DEBUG, "starting the volume %s",
volinfo->volname);
@@ -4324,7 +4324,7 @@ glusterd_restart_bricks (glusterd_conf_t *conf)
continue;
if (start_svcs == _gf_false) {
start_svcs = _gf_true;
- glusterd_svcs_manager (NULL);
+ glusterd_svcs_manager (volinfo);
}
start_svcs = _gf_true;
gf_log (this->name, GF_LOG_DEBUG, "starting the snap "