summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cli/src/cli-cmd-parser.c4
-rw-r--r--cli/src/cli-rpc-ops.c7
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c8
-rw-r--r--rpc/xdr/src/cli1-xdr.x34
-rw-r--r--tests/bugs/bitrot/bug-1207029-bitrot-daemon-should-start-on-valid-node.t70
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c98
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h2
9 files changed, 233 insertions, 18 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index f3b8887d554..178ba1bc641 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -2870,6 +2870,10 @@ cli_cmd_volume_status_parse (const char **words, int wordcount,
cmd |= GF_CLI_STATUS_QUOTAD;
} else if (!strcmp (words[3], "snapd")) {
cmd |= GF_CLI_STATUS_SNAPD;
+ } else if (!strcmp (words[3], "bitd")) {
+ cmd |= GF_CLI_STATUS_BITD;
+ } else if (!strcmp (words[3], "scrub")) {
+ cmd |= GF_CLI_STATUS_SCRUB;
} else {
cmd = GF_CLI_STATUS_BRICK;
ret = dict_set_str (dict, "brick",
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 400c4842225..87b0a284457 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -7056,7 +7056,8 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov,
}
if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) ||
- (cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD))
+ (cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD) ||
+ (cmd & GF_CLI_STATUS_BITD) || (cmd & GF_CLI_STATUS_SCRUB))
notbrick = _gf_true;
if (global_state->mode & GLUSTER_MODE_XML) {
@@ -7176,7 +7177,9 @@ gf_cli_status_cbk (struct rpc_req *req, struct iovec *iov,
if (!strcmp (hostname, "NFS Server") ||
!strcmp (hostname, "Self-heal Daemon") ||
!strcmp (hostname, "Quota Daemon") ||
- !strcmp (hostname, "Snapshot Daemon"))
+ !strcmp (hostname, "Snapshot Daemon") ||
+ !strcmp (hostname, "Scrubber Daemon") ||
+ !strcmp (hostname, "Bitrot Daemon"))
snprintf (status.brick, PATH_MAX + 255, "%s on %s",
hostname, path);
else {
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 3ff3337c01d..1cff44be4d0 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -914,6 +914,10 @@ glusterfs_handle_node_status (rpcsvc_request_t *req)
ret = gf_asprintf (&node_name, "%s", "glustershd");
else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
ret = gf_asprintf (&node_name, "%s", "quotad");
+ else if ((cmd & GF_CLI_STATUS_BITD) != 0)
+ ret = gf_asprintf (&node_name, "%s", "bitd");
+ else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
+ ret = gf_asprintf (&node_name, "%s", "scrubber");
else {
ret = -1;
@@ -939,6 +943,10 @@ glusterfs_handle_node_status (rpcsvc_request_t *req)
ret = gf_asprintf (&subvol_name, "%s-replicate-0", volname);
else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
ret = gf_asprintf (&subvol_name, "%s", volname);
+ else if ((cmd & GF_CLI_STATUS_BITD) != 0)
+ ret = gf_asprintf (&subvol_name, "%s", volname);
+ else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
+ ret = gf_asprintf (&subvol_name, "%s", volname);
else {
ret = -1;
goto out;
diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x
index 189df0e0ccc..74d12cc2c7d 100644
--- a/rpc/xdr/src/cli1-xdr.x
+++ b/rpc/xdr/src/cli1-xdr.x
@@ -142,22 +142,24 @@ enum gf1_cli_top_op {
/* The unconventional hex numbers help us perform
bit-wise operations which reduces complexity */
enum gf_cli_status_type {
- GF_CLI_STATUS_NONE = 0x0000,
- GF_CLI_STATUS_MEM = 0x0001, /*00000000000001*/
- GF_CLI_STATUS_CLIENTS = 0x0002, /*00000000000010*/
- GF_CLI_STATUS_INODE = 0x0004, /*00000000000100*/
- GF_CLI_STATUS_FD = 0x0008, /*00000000001000*/
- GF_CLI_STATUS_CALLPOOL = 0x0010, /*00000000010000*/
- GF_CLI_STATUS_DETAIL = 0x0020, /*00000000100000*/
- GF_CLI_STATUS_TASKS = 0x0040, /*0000001000000*/
- GF_CLI_STATUS_MASK = 0x00FF, /*00000011111111 Used to get the op*/
- GF_CLI_STATUS_VOL = 0x0100, /*00000100000000*/
- GF_CLI_STATUS_ALL = 0x0200, /*00001000000000*/
- GF_CLI_STATUS_BRICK = 0x0400, /*00010000000000*/
- GF_CLI_STATUS_NFS = 0x0800, /*00100000000000*/
- GF_CLI_STATUS_SHD = 0x1000, /*01000000000000*/
- GF_CLI_STATUS_QUOTAD = 0x2000, /*10000000000000*/
- GF_CLI_STATUS_SNAPD = 0x4000 /*100000000000000*/
+ GF_CLI_STATUS_NONE = 0x000000,
+ GF_CLI_STATUS_MEM = 0x000001, /*000000000000001*/
+ GF_CLI_STATUS_CLIENTS = 0x000002, /*000000000000010*/
+ GF_CLI_STATUS_INODE = 0x000004, /*000000000000100*/
+ GF_CLI_STATUS_FD = 0x000008, /*000000000001000*/
+ GF_CLI_STATUS_CALLPOOL = 0x000010, /*000000000010000*/
+ GF_CLI_STATUS_DETAIL = 0x000020, /*000000000100000*/
+ GF_CLI_STATUS_TASKS = 0x000040, /*00000001000000*/
+ GF_CLI_STATUS_MASK = 0x0000FF, /*000000011111111 Used to get the op*/
+ GF_CLI_STATUS_VOL = 0x000100, /*00000000100000000*/
+ GF_CLI_STATUS_ALL = 0x000200, /*00000001000000000*/
+ GF_CLI_STATUS_BRICK = 0x000400, /*00000010000000000*/
+ GF_CLI_STATUS_NFS = 0x000800, /*00000100000000000*/
+ GF_CLI_STATUS_SHD = 0x001000, /*00001000000000000*/
+ GF_CLI_STATUS_QUOTAD = 0x002000, /*00010000000000000*/
+ GF_CLI_STATUS_SNAPD = 0x004000, /*00100000000000000*/
+ GF_CLI_STATUS_BITD = 0x008000, /*01000000000000000*/
+ GF_CLI_STATUS_SCRUB = 0x010000 /*10000000000000000*/
};
/* Identifiers for snapshot clis */
diff --git a/tests/bugs/bitrot/bug-1207029-bitrot-daemon-should-start-on-valid-node.t b/tests/bugs/bitrot/bug-1207029-bitrot-daemon-should-start-on-valid-node.t
new file mode 100644
index 00000000000..263df31ff38
--- /dev/null
+++ b/tests/bugs/bitrot/bug-1207029-bitrot-daemon-should-start-on-valid-node.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+## Test case for bitrot
+## gluster volume status command should show status of bitrot daemon
+
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+function get_bitd_count_from_cli1 {
+ $CLI_1 volume status $V0 | grep "Bitrot Daemon" | grep -v grep | wc -l
+}
+
+function get_scrubd_count_from_cli2 {
+ $CLI_2 volume status $V0 | grep "Scrubber Daemon" | grep -v grep | wc -l
+}
+
+
+## Start a 2 node virtual cluster
+TEST launch_cluster 2;
+
+## Peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+## Lets create and start the volume
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
+TEST $CLI_1 volume start $V0
+
+## Enable bitrot on volume $V0
+TEST $CLI_1 volume bitrot $V0 enable
+
+## From node 1 Gluster volume status command should show the status of bitrot
+## daemon of all the nodes. there are 2 nodes in a cluster with having brick
+## ${V0}1 and ${V0}2 . So there should be 2 bitrot daemon running.
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_bitd_count_from_cli1
+
+
+## From node 2 Gluster volume status command should show the status of Scrubber
+## daemon of all the nodes. There are 2 nodes in a cluster with having brick
+## ${V0}1 and ${V0}2 . So there should be 2 Scrubber daemon running.
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_scrubd_count_from_cli2
+
+
+## From node 1 Gluster volume status command should print status of only
+## scrubber daemon. There should be total 2 scrubber daemon running, one daemon
+## for each node
+
+scrub=$($CLI_1 volume status $V0 scrub | grep "Scrubber Daemon" | \
+ grep -v grep | wc -l)
+TEST [ "$scrub" -eq 2 ];
+
+
+
+## From node 2 Gluster volume status command should print status of only
+## bitd daemon. There should be total 2 bitd daemon running, one daemon
+## for each node
+
+bitd=$($CLI_2 volume status $V0 bitd | grep "Bitrot Daemon" | \
+ grep -v grep | wc -l)
+TEST [ "$bitd" -eq 2 ];
+
+
+cleanup;
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index ac69fc8712d..524ce35d841 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3987,6 +3987,26 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
goto out;
}
+ if ((cmd & GF_CLI_STATUS_BITD) &&
+ (conf->op_version < GD_OP_VERSION_3_7_0)) {
+ snprintf (err_str, sizeof (err_str), "The cluster is operating "
+ "at a lesser version than %d. Getting the status of "
+ "bitd is not allowed in this state",
+ GD_OP_VERSION_3_7_0);
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_SCRUB) &&
+ (conf->op_version < GD_OP_VERSION_3_7_0)) {
+ snprintf (err_str, sizeof (err_str), "The cluster is operating "
+ "at a lesser version than %d. Getting the status of "
+ "scrub is not allowed in this state",
+ GD_OP_VERSION_3_7_0);
+ ret = -1;
+ goto out;
+ }
+
ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index a52fcd1ac2f..e2bbd2d5c42 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -1394,6 +1394,22 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
"quota enabled", volname);
goto out;
}
+ } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
+ if (!glusterd_is_bitrot_enabled (volinfo)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), "Volume %s does not have "
+ "bitrot enabled", volname);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
+ if (!glusterd_is_bitrot_enabled (volinfo)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), "Volume %s does not have "
+ "bitrot enabled. Scrubber will be enabled "
+ "automatically if bitrot is enabled",
+ volname);
+ goto out;
+ }
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
if (!glusterd_is_snapd_enabled (volinfo)) {
ret = -1;
@@ -2771,6 +2787,20 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
+ } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
+ ret = glusterd_add_node_to_dict (priv->bitd_svc.name,
+ rsp_dict, 0, vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+ } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
+ ret = glusterd_add_node_to_dict (priv->scrub_svc.name,
+ rsp_dict, 0, vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
ret = glusterd_add_node_to_dict ("snapd", rsp_dict, 0,
vol_opts);
@@ -2876,6 +2906,34 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
+ other_index++;
+ }
+
+ if (glusterd_is_bitrot_enabled (volinfo)) {
+ ret = glusterd_add_node_to_dict
+ (priv->bitd_svc.name,
+ rsp_dict,
+ other_index,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+ other_index++;
+ }
+
+ /* For handling scrub status. Scrub daemon will be
+ * running automatically when bitrot is enable*/
+ if (glusterd_is_bitrot_enabled (volinfo)) {
+ ret = glusterd_add_node_to_dict
+ (priv->scrub_svc.name,
+ rsp_dict,
+ other_index,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
}
}
}
@@ -5984,6 +6042,8 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_SHD:
case GF_CLI_STATUS_QUOTAD:
case GF_CLI_STATUS_SNAPD:
+ case GF_CLI_STATUS_BITD:
+ case GF_CLI_STATUS_SCRUB:
break;
default:
goto out;
@@ -6084,6 +6144,44 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
cds_list_add_tail (&pending_node->list, selected);
ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
+ if (!priv->bitd_svc.online) {
+ gf_log (this->name, GF_LOG_ERROR, "Bitrot is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->bitd_svc);
+ pending_node->type = GD_NODE_BITD;
+ pending_node->index = 0;
+ cds_list_add_tail (&pending_node->list, selected);
+
+ ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
+ if (!priv->scrub_svc.online) {
+ gf_log (this->name, GF_LOG_ERROR, "Scrubber is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->scrub_svc);
+ pending_node->type = GD_NODE_SCRUB;
+ pending_node->index = 0;
+ cds_list_add_tail (&pending_node->list, selected);
+
+ ret = 0;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
if (!volinfo->snapd.svc.online) {
gf_log (this->name, GF_LOG_ERROR, "snapd is not "
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 6a2757172be..cb9bd1fcaed 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -4020,6 +4020,10 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
svc = &(priv->nfs_svc);
else if (strcmp(server, priv->quotad_svc.name) == 0)
svc = &(priv->quotad_svc);
+ else if (strcmp(server, priv->bitd_svc.name) == 0)
+ svc = &(priv->bitd_svc);
+ else if (strcmp(server, priv->scrub_svc.name) == 0)
+ svc = &(priv->scrub_svc);
//Consider service to be running only when glusterd sees it Online
if (svc->online)
@@ -4041,6 +4045,10 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
ret = dict_set_str (dict, key, "Self-heal Daemon");
else if (!strcmp (server, priv->quotad_svc.name))
ret = dict_set_str (dict, key, "Quota Daemon");
+ else if (!strcmp (server, priv->bitd_svc.name))
+ ret = dict_set_str (dict, key, "Bitrot Daemon");
+ else if (!strcmp (server, priv->scrub_svc.name))
+ ret = dict_set_str (dict, key, "Scrubber Daemon");
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index f2a9be15c9f..5ad92a47177 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -447,6 +447,8 @@ typedef enum gd_node_type_ {
GD_NODE_NFS,
GD_NODE_QUOTAD,
GD_NODE_SNAPD,
+ GD_NODE_BITD,
+ GD_NODE_SCRUB,
} gd_node_type;
typedef enum missed_snap_stat {