summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGaurav Kumar Garg <ggarg@redhat.com>2015-03-25 18:07:24 +0530
committerAtin Mukherjee <amukherj@redhat.com>2015-11-19 19:41:43 -0800
commit097e131481d25e5b1f859f4ea556b8bf56155472 (patch)
tree0086afbddcf7ea8706a5560484ea30fa9488e102
parentb479e086ddbcb5cde83c11b67d542dbd4f2bd739 (diff)
glusterd: cli command implementation for bitrot scrub status
CLI command for bitrot scrub status will be : gluster volume bitrot <volname> scrub status Above command will show the statistics of bitrot scrubber. Upon execution of this command it will show some common scrubber tunable value of volume <VOLNAME> followed by statistics of scrubber statistics of individual nodes. sample ouput for single node: Volume name : <VOLNAME> State of scrub: Active Scrub frequency: biweekly Bitrot error log location: /var/log/glusterfs/bitd.log Scrubber error log location: /var/log/glusterfs/scrub.log ========================================================= Node name: Number of Scrubbed files: Number of Unsigned files: Last completed scrub time: Duration of last scrub: Error count: ========================================================= This is just infrastructure. list of bad file, last scrub time, error count value will be taken care by http://review.gluster.org/#/c/12503/ and http://review.gluster.org/#/c/12654/ patches. Change-Id: I3ed3c7057c9d0c894233f4079a7f185d90c202d1 BUG: 1207627 Signed-off-by: Gaurav Kumar Garg <ggarg@redhat.com> Reviewed-on: http://review.gluster.org/10231 Reviewed-by: Atin Mukherjee <amukherj@redhat.com> Tested-by: NetBSD Build System <jenkins@build.gluster.org> Tested-by: Gluster Build System <jenkins@build.gluster.com>
-rw-r--r--cli/src/cli-cmd-parser.c19
-rw-r--r--cli/src/cli-cmd-volume.c2
-rw-r--r--cli/src/cli-rpc-ops.c163
-rw-r--r--glusterfsd/src/glusterfsd-messages.h6
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c83
-rw-r--r--libglusterfs/src/globals.h4
-rw-r--r--libglusterfs/src/glusterfs.h1
-rw-r--r--rpc/rpc-lib/src/protocol-common.h1
-rw-r--r--rpc/xdr/src/cli1-xdr.x1
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.c18
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c46
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c93
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c394
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h18
17 files changed, 846 insertions, 15 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index 921648f2822..ef07e88b795 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -5116,7 +5116,7 @@ cli_cmd_bitrot_parse (const char **words, int wordcount, dict_t **options)
"biweekly", "monthly",
NULL};
char *scrub_values[] = {"pause", "resume",
- NULL};
+ "status", NULL};
dict_t *dict = NULL;
gf_bitrot_type type = GF_BITROT_OPTION_TYPE_NONE;
int32_t expiry_time = 0;
@@ -5180,6 +5180,23 @@ cli_cmd_bitrot_parse (const char **words, int wordcount, dict_t **options)
}
}
+ if ((strcmp (words[3], "scrub") == 0) &&
+ (strcmp (words[4], "status") == 0)) {
+ if (wordcount == 5) {
+ type = GF_BITROT_CMD_SCRUB_STATUS;
+ ret = dict_set_str (dict, "scrub-value",
+ (char *) words[4]);
+ if (ret) {
+ cli_out ("Failed to set dict for scrub status");
+ goto out;
+ }
+ goto set_type;
+ } else {
+ ret = -1;
+ goto out;
+ }
+ }
+
if (!strcmp (w, "scrub-throttle")) {
if (!words[4]) {
cli_err ("Missing scrub-throttle value for bitrot "
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index 86274a85c1d..4e16ce1528f 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -2785,7 +2785,7 @@ struct cli_cmd volume_cmds[] = {
"volume bitrot <volname> scrub-throttle {lazy|normal|aggressive} |\n"
"volume bitrot <volname> scrub-frequency {hourly|daily|weekly|biweekly"
"|monthly} |\n"
- "volume bitrot <volname> scrub {pause|resume}",
+ "volume bitrot <volname> scrub {pause|resume|status}",
cli_cmd_bitrot_cbk,
"Bitrot translator specific operation. For more information about "
"bitrot command type 'man gluster'"
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index ba044efe5d7..27d0595bd3c 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -10666,10 +10666,156 @@ out:
}
int
+gf_cli_print_bitrot_scrub_status (dict_t *dict)
+{
+ int i = 1;
+ int ret = -1;
+ int count = 0;
+ char key[256] = {0,};
+ char *volname = NULL;
+ char *node_name = NULL;
+ char *scrub_freq = NULL;
+ char *state_scrub = NULL;
+ char *scrub_impact = NULL;
+ char *scrub_log_file = NULL;
+ char *bitrot_log_file = NULL;
+ uint64_t scrub_files = 0;
+ uint64_t unsigned_files = 0;
+ uint64_t scrub_time = 0;
+ uint64_t last_scrub = 0;
+ uint64_t error_count = 0;
+
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get volume name");
+
+ ret = dict_get_str (dict, "features.scrub", &state_scrub);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get scrub state value");
+
+ ret = dict_get_str (dict, "features.scrub-throttle", &scrub_impact);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get scrub impact "
+ "value");
+
+ ret = dict_get_str (dict, "features.scrub-freq", &scrub_freq);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get scrub -freq value");
+
+ ret = dict_get_str (dict, "bitrot_log_file", &bitrot_log_file);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get bitrot log file "
+ "location");
+
+ ret = dict_get_str (dict, "scrub_log_file", &scrub_log_file);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get scrubber log file "
+ "location");
+
+ ret = dict_get_int32 (dict, "count", &count);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "count not get count value from"
+ " dictionary");
+ goto out;
+ }
+
+ cli_out ("\n%s: %s\n", "Volume name ", volname);
+
+ cli_out ("%s: %s\n", "State of scrub", state_scrub);
+
+ cli_out ("%s: %s\n", "Scrub impact", scrub_impact);
+
+ cli_out ("%s: %s\n", "Scrub frequency", scrub_freq);
+
+ cli_out ("%s: %s\n", "Bitrot error log location", bitrot_log_file);
+
+ cli_out ("%s: %s\n", "Scrubber error log location", scrub_log_file);
+
+
+ for (i = 1; i <= count; i++) {
+ /* Reset the variables to prevent carryover of values */
+ node_name = NULL;
+ last_scrub = 0;
+ scrub_time = 0;
+ error_count = 0;
+ scrub_files = 0;
+ unsigned_files = 0;
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "node-name-%d", i);
+ ret = dict_get_str (dict, key, &node_name);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get node-name");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "scrubbed-files-%d", i);
+ ret = dict_get_uint64 (dict, key, &scrub_files);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get scrubbed "
+ "files");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "unsigned-files-%d", i);
+ ret = dict_get_uint64 (dict, key, &unsigned_files);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get unsigned "
+ "files");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "scrub-duration-%d", i);
+ ret = dict_get_uint64 (dict, key, &scrub_time);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get last scrub "
+ "duration");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "last-scrub-time-%d", i);
+ ret = dict_get_uint64 (dict, key, &last_scrub);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get last scrub"
+ " time");
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "error-count-%d", i);
+ ret = dict_get_uint64 (dict, key, &error_count);
+ if (ret)
+ gf_log ("cli", GF_LOG_TRACE, "failed to get error "
+ "count");
+
+ cli_out ("\n%s\n", "=========================================="
+ "===============");
+
+ cli_out ("%s: %s\n", "Node name", node_name);
+
+ cli_out ("%s: %"PRIu64 "\n", "Number of Scrubbed files",
+ scrub_files);
+
+ cli_out ("%s: %"PRIu64 "\n", "Number of Unsigned files",
+ unsigned_files);
+
+ cli_out ("%s: %"PRIu64 "\n", "Last completed scrub time",
+ scrub_time);
+
+ cli_out ("%s: %"PRIu64 "\n", "Duration of last scrub",
+ last_scrub);
+
+ cli_out ("%s: %"PRIu64 "\n", "Error count", error_count);
+
+ }
+ cli_out ("%s\n", "=========================================="
+ "===============");
+
+out:
+ return 0;
+}
+
+int
gf_cli_bitrot_cbk (struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
int ret = -1;
+ int type = 0;
gf_cli_rsp rsp = {0, };
dict_t *dict = NULL;
call_frame_t *frame = NULL;
@@ -10723,6 +10869,22 @@ gf_cli_bitrot_cbk (struct rpc_req *req, struct iovec *iov,
gf_log ("cli", GF_LOG_DEBUG, "Received resp to bit rot command");
+ ret = dict_get_int32 (dict, "type", &type);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to get command type");
+ goto out;
+ }
+
+ if ((type == GF_BITROT_CMD_SCRUB_STATUS) &&
+ !(global_state->mode & GLUSTER_MODE_XML)) {
+ ret = gf_cli_print_bitrot_scrub_status (dict);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR, "Failed to print bitrot "
+ "scrub status");
+ }
+ goto out;
+ }
+
xml_output:
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_profile (dict, rsp.op_ret,
@@ -10749,7 +10911,6 @@ out:
cli_cmd_broadcast_response (ret);
return ret;
-
}
int32_t
diff --git a/glusterfsd/src/glusterfsd-messages.h b/glusterfsd/src/glusterfsd-messages.h
index 9c6196c1d44..caa999506e6 100644
--- a/glusterfsd/src/glusterfsd-messages.h
+++ b/glusterfsd/src/glusterfsd-messages.h
@@ -36,7 +36,7 @@
*/
#define GLFS_COMP_BASE GLFS_MSGID_COMP_GLUSTERFSD
-#define GLFS_NUM_MESSAGES 34
+#define GLFS_NUM_MESSAGES 36
#define GLFS_MSGID_END (GLFS_COMP_BASE + GLFS_NUM_MESSAGES + 1)
/* Messaged with message IDs */
#define glfs_msg_start_x GLFS_COMP_BASE, "Invalid: Start of messages"
@@ -104,6 +104,10 @@
"was provided"
#define glusterfsd_msg_34 (GLFS_COMP_BASE + 34), "memory accounting init" \
" failed."
+#define glusterfsd_msg_35 (GLFS_COMP_BASE + 35), "rpc req buffer " \
+ " unserialization failed."
+#define glusterfsd_msg_36 (GLFS_COMP_BASE + 36), "problem in xlator " \
+ " loading."
/*------------*/
#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages"
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 749873872d2..2e44f94b1bd 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -21,6 +21,7 @@
#include "rpc-clnt.h"
#include "protocol-common.h"
+#include "glusterfsd-messages.h"
#include "glusterfs3.h"
#include "portmap-xdr.h"
#include "xdr-generic.h"
@@ -641,6 +642,87 @@ out:
return 0;
}
+int
+glusterfs_handle_bitrot (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gd1_mgmt_brick_op_req xlator_req = {0,};
+ dict_t *input = NULL;
+ dict_t *output = NULL;
+ xlator_t *any = NULL;
+ xlator_t *this = NULL;
+ xlator_t *xlator = NULL;
+ char msg[2048] = {0,};
+ char xname[1024] = {0,};
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ ret = xdr_to_generic (req->msg[0], &xlator_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
+
+ if (ret < 0) {
+ /*failed to decode msg;*/
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ ctx = glusterfsd_ctx;
+ GF_ASSERT (ctx);
+
+ active = ctx->active;
+ if (!active) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ any = active->first;
+
+ input = dict_new ();
+ if (!input)
+ goto out;
+
+ ret = dict_unserialize (xlator_req.input.input_val,
+ xlator_req.input.input_len,
+ &input);
+
+ if (ret < 0) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35);
+ goto out;
+ }
+
+ /* Send scrubber request to bitrot xlator */
+ snprintf (xname, sizeof (xname), "%s-bit-rot-0", xlator_req.name);
+ xlator = xlator_search_by_name (any, xname);
+ if (!xlator) {
+ snprintf (msg, sizeof (msg), "xlator %s is not loaded", xname);
+ gf_msg (this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36);
+ goto out;
+ }
+
+ output = dict_new ();
+ if (!output) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = xlator->notify (xlator, GF_EVENT_SCRUB_STATUS, input,
+ output);
+out:
+ glusterfs_translator_info_response_send (req, ret, msg, output);
+
+ if (input)
+ dict_unref (input);
+ free (xlator_req.input.input_val); /*malloced by xdr*/
+ if (output)
+ dict_unref (output);
+ free (xlator_req.name);
+
+ return 0;
+}
int
glusterfs_handle_defrag (rpcsvc_request_t *req)
@@ -1394,6 +1476,7 @@ rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
[GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS, glusterfs_handle_node_status, NULL, 0, DRC_NA},
[GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP", GLUSTERD_VOLUME_BARRIER_OP, glusterfs_handle_volume_barrier_op, NULL, 0, DRC_NA},
[GLUSTERD_BRICK_BARRIER] = {"BARRIER", GLUSTERD_BRICK_BARRIER, glusterfs_handle_barrier, NULL, 0, DRC_NA},
+ [GLUSTERD_NODE_BITROT] = {"BITROT", GLUSTERD_NODE_BITROT, glusterfs_handle_bitrot, NULL, 0, DRC_NA},
};
struct rpcsvc_program glusterfs_mop_prog = {
diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h
index 88e5f77721b..3a4ff44a335 100644
--- a/libglusterfs/src/globals.h
+++ b/libglusterfs/src/globals.h
@@ -38,7 +38,7 @@
*/
#define GD_OP_VERSION_MIN 1 /* MIN is the fresh start op-version, mostly
should not change */
-#define GD_OP_VERSION_MAX GD_OP_VERSION_3_7_6 /* MAX VERSION is the maximum
+#define GD_OP_VERSION_MAX GD_OP_VERSION_3_7_7 /* MAX VERSION is the maximum
count in VME table, should
keep changing with
introduction of newer
@@ -60,6 +60,8 @@
#define GD_OP_VERSION_3_7_6 30706 /* Op-version for GlusterFS 3.7.6 */
+#define GD_OP_VERSION_3_7_7 30707 /* Op-version for GlusterFS 3.7.7 */
+
#define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_3_6_0
#include "xlator.h"
diff --git a/libglusterfs/src/glusterfs.h b/libglusterfs/src/glusterfs.h
index 4e4e5b62ab0..d7deeacbf7f 100644
--- a/libglusterfs/src/glusterfs.h
+++ b/libglusterfs/src/glusterfs.h
@@ -625,6 +625,7 @@ typedef enum {
GF_EVENT_VOLUME_BARRIER_OP,
GF_EVENT_UPCALL,
GF_EVENT_SOME_CHILD_DOWN,
+ GF_EVENT_SCRUB_STATUS,
GF_EVENT_MAXVAL,
} glusterfs_event_t;
diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h
index 12031738e0c..96d315c5e79 100644
--- a/rpc/rpc-lib/src/protocol-common.h
+++ b/rpc/rpc-lib/src/protocol-common.h
@@ -217,6 +217,7 @@ enum glusterd_brick_procnum {
GLUSTERD_NODE_STATUS,
GLUSTERD_VOLUME_BARRIER_OP,
GLUSTERD_BRICK_BARRIER,
+ GLUSTERD_NODE_BITROT,
GLUSTERD_BRICK_MAXVALUE,
};
diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x
index 56f34bc2dae..231b5261f0e 100644
--- a/rpc/xdr/src/cli1-xdr.x
+++ b/rpc/xdr/src/cli1-xdr.x
@@ -43,6 +43,7 @@ enum gf_bitrot_type {
GF_BITROT_OPTION_TYPE_SCRUB_FREQ,
GF_BITROT_OPTION_TYPE_SCRUB,
GF_BITROT_OPTION_TYPE_EXPIRY_TIME,
+ GF_BITROT_CMD_SCRUB_STATUS,
GF_BITROT_OPTION_TYPE_MAX
};
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.c b/xlators/features/bit-rot/src/bitd/bit-rot.c
index d6ae5e2fdd2..c4ed7dee718 100644
--- a/xlators/features/bit-rot/src/bitd/bit-rot.c
+++ b/xlators/features/bit-rot/src/bitd/bit-rot.c
@@ -1550,9 +1550,12 @@ int
notify (xlator_t *this, int32_t event, void *data, ...)
{
int idx = -1;
+ int ret = -1;
xlator_t *subvol = NULL;
br_child_t *child = NULL;
br_private_t *priv = NULL;
+ dict_t *output = NULL;
+ va_list ap;
subvol = (xlator_t *)data;
priv = this->private;
@@ -1619,6 +1622,21 @@ notify (xlator_t *this, int32_t event, void *data, ...)
default_notify (this, event, data);
break;
+ case GF_EVENT_SCRUB_STATUS:
+ gf_log (this->name, GF_LOG_INFO, "BitRot scrub status "
+ "called");
+ va_start (ap, data);
+ output = va_arg (ap, dict_t *);
+
+ /* As of now hardcoding last-scrub-time value. At the time of
+ * Final patch submission this option value along with other
+ * few option value will be calculate based on real time */
+ ret = dict_set_uint64 (output, "last-scrub-time", 12);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG, "Failed to set last "
+ "scrub time value");
+ }
+ break;
default:
default_notify (this, event, data);
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
index c9cf9297bb8..6e91106c8e5 100644
--- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c
@@ -39,15 +39,16 @@ const char *gd_bitrot_op_list[GF_BITROT_OPTION_TYPE_MAX] = {
int
__glusterd_handle_bitrot (rpcsvc_request_t *req)
{
- int32_t ret = -1;
- gf_cli_req cli_req = { {0,} };
- dict_t *dict = NULL;
- glusterd_op_t cli_op = GD_OP_BITROT;
- char *volname = NULL;
- int32_t type = 0;
+ int32_t ret = -1;
+ gf_cli_req cli_req = { {0,} };
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_BITROT;
+ char *volname = NULL;
+ char *scrub = NULL;
+ int32_t type = 0;
char msg[2048] = {0,};
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
GF_ASSERT (req);
@@ -109,6 +110,34 @@ __glusterd_handle_bitrot (rpcsvc_request_t *req)
goto out;
}
+ if (type == GF_BITROT_CMD_SCRUB_STATUS) {
+ /* Backward compatibility handling for scrub status command*/
+ if (conf->op_version < GD_OP_VERSION_3_7_7) {
+ snprintf (msg, sizeof (msg), "Cannot execute command. "
+ "The cluster is operating at version %d. "
+ "Bitrot scrub status command unavailable in "
+ "this version", conf->op_version);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "scrub-value", &scrub);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_GET_FAILED,
+ "Failed to get scrub value.");
+ ret = -1;
+ goto out;
+ }
+
+ if (!strncmp (scrub, "status", strlen ("status"))) {
+ ret = glusterd_op_begin_synctask (req,
+ GD_OP_SCRUB_STATUS,
+ dict);
+ goto out;
+ }
+ }
+
ret = glusterd_op_begin_synctask (req, GD_OP_BITROT, dict);
out:
@@ -542,6 +571,7 @@ glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
op_errstr);
if (ret)
goto out;
+ case GF_BITROT_CMD_SCRUB_STATUS:
break;
default:
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 7db62e98b16..8bc47fc8c49 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -685,6 +685,8 @@ glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,
{
int ret = -1;
gd1_mgmt_brick_op_req *brick_req = NULL;
+ char xlname[1024] = {0,};
+ char *volname = NULL;
GF_ASSERT (op < GD_OP_MAX);
GF_ASSERT (op > GD_OP_NONE);
@@ -713,6 +715,20 @@ glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,
break;
+ case GD_OP_SCRUB_STATUS:
+ brick_req = GF_CALLOC (1, sizeof(*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req)
+ goto out;
+
+ brick_req->op = GLUSTERD_NODE_BITROT;
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret)
+ goto out;
+
+ brick_req->name = gf_strdup (volname);
+ break;
default:
goto out;
}
@@ -4035,6 +4051,7 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)
case GD_OP_DEFRAG_BRICK_VOLUME:
case GD_OP_BARRIER:
case GD_OP_BITROT:
+ case GD_OP_SCRUB_STATUS:
{
do_common = _gf_true;
}
@@ -4616,6 +4633,7 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx)
* same
*/
case GD_OP_DEFRAG_BRICK_VOLUME:
+ case GD_OP_SCRUB_STATUS:
ret = dict_get_int32 (op_ctx, "count", &count);
if (ret) {
gf_msg_debug (this->name, 0,
@@ -4663,6 +4681,13 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx)
GD_MSG_CONVERSION_FAILED,
"Failed uuid to hostname conversion");
+ /* Since Both rebalance and bitrot scrub status are going to
+ * use same code path till here, we should break in case
+ * of scrub status */
+ if (op == GD_OP_SCRUB_STATUS) {
+ break;
+ }
+
ret = glusterd_op_check_peer_defrag_status (op_ctx, count);
if (ret)
gf_msg (this->name, GF_LOG_ERROR, 0,
@@ -5258,6 +5283,7 @@ glusterd_need_brick_op (glusterd_op_t op)
case GD_OP_STATUS_VOLUME:
case GD_OP_DEFRAG_BRICK_VOLUME:
case GD_OP_HEAL_VOLUME:
+ case GD_OP_SCRUB_STATUS:
ret = _gf_true;
break;
default:
@@ -5520,6 +5546,7 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,
break;
case GD_OP_BITROT:
+ case GD_OP_SCRUB_STATUS:
ret = glusterd_op_stage_bitrot (dict, op_errstr,
rsp_dict);
break;
@@ -5644,6 +5671,7 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr,
break;
case GD_OP_BITROT:
+ case GD_OP_SCRUB_STATUS:
ret = glusterd_op_bitrot (dict, op_errstr, rsp_dict);
break;
@@ -6808,6 +6836,68 @@ out:
return ret;
}
+static int
+glusterd_bricks_select_scrub (dict_t *dict, char **op_errstr,
+ struct cds_list_head *selected)
+{
+ int ret = -1;
+ char *volname = NULL;
+ char msg[2048] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_pending_node_t *pending_node = NULL;
+
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT (this);
+ GF_ASSERT (priv);
+
+ GF_ASSERT (dict);
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_GET_FAILED, "Unable to get"
+ " volname");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (msg, sizeof (msg), "Volume %s does not exist",
+ volname);
+
+ *op_errstr = gf_strdup (msg);
+ gf_msg (this->name, GF_LOG_ERROR, EINVAL,
+ GD_MSG_VOL_NOT_FOUND, "%s", msg);
+ goto out;
+ }
+
+ if (!priv->scrub_svc.online) {
+ ret = 0;
+ snprintf (msg, sizeof (msg), "Scrubber daemon is not running");
+
+ gf_msg_debug (this->name, 0, "%s", msg);
+ goto out;
+ }
+
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+
+ pending_node->node = &(priv->scrub_svc);
+ pending_node->type = GD_NODE_SCRUB;
+ cds_list_add_tail (&pending_node->list, selected);
+ pending_node = NULL;
+out:
+ gf_msg_debug (this->name, 0, "Returning %d", ret);
+ return ret;
+}
/* Select the bricks to send the barrier request to.
* This selects the bricks of the given volume which are present on this peer
* and are running
@@ -7021,6 +7111,9 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr,
case GD_OP_SNAP:
ret = glusterd_bricks_select_snap (dict, op_errstr, selected);
break;
+ case GD_OP_SCRUB_STATUS:
+ ret = glusterd_bricks_select_scrub (dict, op_errstr, selected);
+ break;
default:
break;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 8a826521f56..fd51255f65b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -142,6 +142,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,
case GD_OP_SNAP:
case GD_OP_BARRIER:
case GD_OP_BITROT:
+ case GD_OP_SCRUB_STATUS:
{
/*nothing specific to be done*/
break;
@@ -2234,6 +2235,7 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,
if ((pending_node->type == GD_NODE_NFS) ||
(pending_node->type == GD_NODE_QUOTAD) ||
(pending_node->type == GD_NODE_SNAPD) ||
+ (pending_node->type == GD_NODE_SCRUB) ||
((pending_node->type == GD_NODE_SHD) &&
(req_ctx->op == GD_OP_STATUS_VOLUME)))
ret = glusterd_node_op_build_payload
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 064077278bd..a0b856160c9 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -304,6 +304,9 @@ glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)
goto out;
break;
+ case GD_OP_SCRUB_STATUS:
+ ret = glusterd_volume_bitrot_scrub_use_rsp_dict (aggr, rsp);
+ break;
default:
break;
}
@@ -932,7 +935,7 @@ gd_syncop_mgmt_brick_op (struct rpc_clnt *rpc, glusterd_pending_node_t *pnode,
args.op_errno = ENOTCONN;
if ((pnode->type == GD_NODE_NFS) ||
- (pnode->type == GD_NODE_QUOTAD) ||
+ (pnode->type == GD_NODE_QUOTAD) || (pnode->type == GD_NODE_SCRUB) ||
((pnode->type == GD_NODE_SHD) && (op == GD_OP_STATUS_VOLUME))) {
ret = glusterd_node_op_build_payload (op, &req, dict_out);
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 6f24e9274ff..c31c394f661 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -4222,7 +4222,8 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)
} else if (pending_node->type == GD_NODE_SHD ||
pending_node->type == GD_NODE_NFS ||
- pending_node->type == GD_NODE_QUOTAD) {
+ pending_node->type == GD_NODE_QUOTAD ||
+ pending_node->type == GD_NODE_SCRUB) {
svc = pending_node->node;
rpc = svc->conn.rpc;
} else if (pending_node->type == GD_NODE_REBALANCE) {
@@ -8241,6 +8242,393 @@ out:
}
int
+glusterd_volume_bitrot_scrub_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
+{
+ int ret = -1;
+ uint64_t value = 0;
+ int32_t count = 0;
+ char key[256] = {0,};
+ uint64_t error_count = 0;
+ uint64_t scrubbed_files = 0;
+ uint64_t unsigned_files = 0;
+ uint64_t scrub_duration = 0;
+ uint64_t last_scrub_time = 0;
+ char *volname = NULL;
+ char *node_uuid = NULL;
+ char *node_uuid_str = NULL;
+ char *bitd_log = NULL;
+ char *scrub_log = NULL;
+ char *scrub_freq = NULL;
+ char *scrub_state = NULL;
+ char *scrub_impact = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int src_count = 0;
+ int dst_count = 0;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_str (aggr, "volname", &volname);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ gf_msg (THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Unable to find volinfo for volume: %s", volname);
+ goto out;
+ }
+
+ ret = dict_get_int32 (aggr, "count", &dst_count);
+
+ ret = dict_get_int32 (rsp_dict, "count", &src_count);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "failed to get count value");
+ ret = 0;
+ goto out;
+ }
+
+ ret = dict_set_int32 (aggr, "count", src_count+dst_count);
+ if (ret)
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set count in dictonary");
+
+ snprintf (key, 256, "node-uuid-%d", src_count);
+ ret = dict_get_str (rsp_dict, key, &node_uuid);
+ if (!ret) {
+ node_uuid_str = gf_strdup (node_uuid);
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "node-uuid-%d", src_count+dst_count);
+ ret = dict_set_dynstr (aggr, key, node_uuid_str);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "failed to set node-uuid");
+ }
+ }
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "scrubbed-files-%d", src_count);
+ ret = dict_get_uint64 (rsp_dict, key, &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "scrubbed-files-%d", src_count+dst_count);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrubbed-file value");
+ }
+ }
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "unsigned-files-%d", src_count);
+ ret = dict_get_uint64 (rsp_dict, key, &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "unsigned-files-%d", src_count+dst_count);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "unsigned-file value");
+ }
+ }
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "last-scrub-time-%d", src_count);
+ ret = dict_get_uint64 (rsp_dict, key, &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "last-scrub-time-%d", src_count+dst_count);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "last scrub time value");
+ }
+ }
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "scrub-duration-%d", src_count);
+ ret = dict_get_uint64 (rsp_dict, key, &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "scrub-duration-%d", src_count+dst_count);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrubbed-duration value");
+ }
+ }
+
+ memset (key, 0, 256);
+ snprintf (key, 256, "error-count-%d", src_count);
+ ret = dict_get_uint64 (rsp_dict, key, &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "error-count-%d", src_count+dst_count);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set error "
+ "count value");
+ }
+ }
+
+ ret = dict_get_str (rsp_dict, "bitrot_log_file", &bitd_log);
+ if (!ret) {
+ ret = dict_set_str (aggr, "bitrot_log_file", bitd_log);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "bitrot log file location");
+ goto out;
+ }
+ }
+
+ ret = dict_get_str (rsp_dict, "scrub_log_file", &scrub_log);
+ if (!ret) {
+ ret = dict_set_str (aggr, "scrub_log_file", scrub_log);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrubber log file location");
+ goto out;
+ }
+ }
+
+ ret = dict_get_str (rsp_dict, "features.scrub-freq", &scrub_freq);
+ if (!ret) {
+ ret = dict_set_str (aggr, "features.scrub-freq", scrub_freq);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrub-frequency value to dictionary");
+ goto out;
+ }
+ }
+
+ ret = dict_get_str (rsp_dict, "features.scrub-throttle", &scrub_impact);
+ if (!ret) {
+ ret = dict_set_str (aggr, "features.scrub-throttle",
+ scrub_impact);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrub-throttle value to dictionary");
+ goto out;
+ }
+ }
+
+ ret = dict_get_str (rsp_dict, "features.scrub", &scrub_state);
+ if (!ret) {
+ ret = dict_set_str (aggr, "features.scrub", scrub_state);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrub state value to dictionary");
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_bitrot_volume_node_rsp (dict_t *aggr, dict_t *rsp_dict)
+{
+ int ret = -1;
+ uint64_t value = 0;
+ int32_t count = 0;
+ int32_t index = 0;
+ char key[256] = {0,};
+ char buf[1024] = {0,};
+ uint64_t error_count = 0;
+ int32_t i = 0;
+ uint64_t scrubbed_files = 0;
+ uint64_t unsigned_files = 0;
+ uint64_t scrub_duration = 0;
+ uint64_t last_scrub_time = 0;
+ char *volname = NULL;
+ char *node_str = NULL;
+ char *scrub_freq = NULL;
+ char *scrub_state = NULL;
+ char *scrub_impact = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_set_str (aggr, "bitrot_log_file",
+ (priv->bitd_svc.proc.logfile));
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set bitrot log file location");
+ goto out;
+ }
+
+ ret = dict_set_str (aggr, "scrub_log_file",
+ (priv->scrub_svc.proc.logfile));
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set scrubber log file location");
+ goto out;
+ }
+
+ ret = dict_get_str (aggr, "volname", &volname);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
+ "Unable to get volume name");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
+ "Unable to find volinfo for volume: %s", volname);
+ goto out;
+ }
+
+ ret = dict_get_int32 (aggr, "count", &i);
+ i++;
+
+ ret = dict_set_int32 (aggr, "count", i);
+ if (ret)
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "Failed to set count");
+
+ snprintf (buf, 1024, "%s", uuid_utoa (MY_UUID));
+
+ snprintf (key, 256, "node-uuid-%d", i);
+ ret = dict_set_dynstr_with_alloc (aggr, key, buf);
+ if (ret)
+ gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
+ "failed to set node-uuid");
+
+ ret = dict_get_str (volinfo->dict, "features.scrub-freq", &scrub_freq);
+ if (!ret) {
+ ret = dict_set_str (aggr, "features.scrub-freq", scrub_freq);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrub-frequency value to dictionary");
+ }
+ } else {
+ /* By Default scrub-frequency is bi-weekly. So when user
+ * enable bitrot then scrub-frequency value will not be
+ * present in volinfo->dict. Setting by-default value of
+ * scrub-frequency explicitly for presenting it to scrub
+ * status.
+ */
+ ret = dict_set_dynstr_with_alloc (aggr, "features.scrub-freq",
+ "biweekly");
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrub-frequency value to dictionary");
+ }
+ }
+
+ ret = dict_get_str (volinfo->dict, "features.scrub-throttle",
+ &scrub_impact);
+ if (!ret) {
+ ret = dict_set_str (aggr, "features.scrub-throttle",
+ scrub_impact);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrub-throttle value to dictionary");
+ }
+ } else {
+ /* By Default scrub-throttle is lazy. So when user
+ * enable bitrot then scrub-throttle value will not be
+ * present in volinfo->dict. Setting by-default value of
+ * scrub-throttle explicitly for presenting it to
+ * scrub status.
+ */
+ ret = dict_set_dynstr_with_alloc (aggr,
+ "features.scrub-throttle",
+ "lazy");
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrub-throttle value to dictionary");
+ }
+ }
+
+ ret = dict_get_str (volinfo->dict, "features.scrub", &scrub_state);
+ if (!ret) {
+ ret = dict_set_str (aggr, "features.scrub", scrub_state);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrub state value to dictionary");
+ }
+ }
+
+ ret = dict_get_uint64 (rsp_dict, "scrubbed-files", &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "scrubbed-files-%d", i);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrubbed-file value");
+ }
+ }
+
+ ret = dict_get_uint64 (rsp_dict, "unsigned-files", &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "unsigned-files-%d", i);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "unsigned-file value");
+ }
+ }
+
+ ret = dict_get_uint64 (rsp_dict, "last-scrub-time", &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "last-scrub-time-%d", i);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "last scrub time value");
+ }
+ }
+
+ ret = dict_get_uint64 (rsp_dict, "scrub-duration", &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "scrub-duration-%d", i);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set "
+ "scrubbed-duration value");
+ }
+ }
+
+ ret = dict_get_uint64 (rsp_dict, "error-count", &value);
+ if (!ret) {
+ memset (key, 0, 256);
+ snprintf (key, 256, "error-count-%d", i);
+ ret = dict_set_uint64 (aggr, key, value);
+ if (ret) {
+ gf_msg_debug (this->name, 0, "Failed to set error "
+ "count value");
+ }
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
{
char key[256] = {0,};
@@ -9139,6 +9527,10 @@ glusterd_handle_node_rsp (dict_t *req_dict, void *pending_entry,
ret = glusterd_heal_volume_brick_rsp (req_dict, rsp_dict,
op_ctx, op_errstr);
break;
+ case GD_OP_SCRUB_STATUS:
+ ret = glusterd_bitrot_volume_node_rsp (op_ctx, rsp_dict);
+
+ break;
default:
break;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index b7302c8cb91..f3895db408b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -436,6 +436,11 @@ int32_t
glusterd_handle_node_rsp (dict_t *req_ctx, void *pending_entry,
glusterd_op_t op, dict_t *rsp_dict, dict_t *op_ctx,
char **op_errstr, gd_node_type type);
+int
+glusterd_volume_bitrot_scrub_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict);
+
+int
+glusterd_volume_heal_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict);
int32_t
glusterd_check_if_quota_trans_enabled (glusterd_volinfo_t *volinfo);
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 124c6cf0f9f..817112e9aca 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -116,6 +116,7 @@ typedef enum glusterd_op_ {
GD_OP_BITROT,
GD_OP_DETACH_TIER,
GD_OP_TIER_MIGRATE,
+ GD_OP_SCRUB_STATUS,
GD_OP_MAX,
} glusterd_op_t;
@@ -276,6 +277,20 @@ typedef struct _auth auth_t;
#define CAPS_OFFLOAD_SNAPSHOT 0x00000008
#define CAPS_OFFLOAD_ZERO 0x00000020
+struct glusterd_bitrot_scrub_ {
+ char *scrub_state;
+ char *scrub_impact;
+ char *scrub_freq;
+ uint64_t scrubbed_files;
+ uint64_t unsigned_files;
+ uint64_t last_scrub_time;
+ uint64_t scrub_duration;
+ uint64_t error_count;
+};
+
+typedef struct glusterd_bitrot_scrub_ glusterd_bitrot_scrub_t;
+
+
struct glusterd_rebalance_ {
gf_defrag_status_t defrag_status;
uint64_t rebalance_files;
@@ -382,6 +397,9 @@ struct glusterd_volinfo_ {
/* Replace brick status */
glusterd_replace_brick_t rep_brick;
+ /* Bitrot scrub status*/
+ glusterd_bitrot_scrub_t bitrot_scrub;
+
int version;
uint32_t quota_conf_version;
uint32_t cksum;