From 097e131481d25e5b1f859f4ea556b8bf56155472 Mon Sep 17 00:00:00 2001 From: Gaurav Kumar Garg Date: Wed, 25 Mar 2015 18:07:24 +0530 Subject: glusterd: cli command implementation for bitrot scrub status CLI command for bitrot scrub status will be : gluster volume bitrot scrub status Above command will show the statistics of bitrot scrubber. Upon execution of this command it will show some common scrubber tunable value of volume followed by statistics of scrubber statistics of individual nodes. sample ouput for single node: Volume name : State of scrub: Active Scrub frequency: biweekly Bitrot error log location: /var/log/glusterfs/bitd.log Scrubber error log location: /var/log/glusterfs/scrub.log ========================================================= Node name: Number of Scrubbed files: Number of Unsigned files: Last completed scrub time: Duration of last scrub: Error count: ========================================================= This is just infrastructure. list of bad file, last scrub time, error count value will be taken care by http://review.gluster.org/#/c/12503/ and http://review.gluster.org/#/c/12654/ patches. Change-Id: I3ed3c7057c9d0c894233f4079a7f185d90c202d1 BUG: 1207627 Signed-off-by: Gaurav Kumar Garg Reviewed-on: http://review.gluster.org/10231 Reviewed-by: Atin Mukherjee Tested-by: NetBSD Build System Tested-by: Gluster Build System --- xlators/mgmt/glusterd/src/glusterd-bitrot.c | 46 +++- xlators/mgmt/glusterd/src/glusterd-op-sm.c | 93 +++++++ xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 2 + xlators/mgmt/glusterd/src/glusterd-syncop.c | 5 +- xlators/mgmt/glusterd/src/glusterd-utils.c | 394 ++++++++++++++++++++++++++- xlators/mgmt/glusterd/src/glusterd-utils.h | 5 + xlators/mgmt/glusterd/src/glusterd.h | 18 ++ 7 files changed, 553 insertions(+), 10 deletions(-) (limited to 'xlators/mgmt/glusterd/src') diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c index c9cf9297bb8..6e91106c8e5 100644 --- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c +++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c @@ -39,15 +39,16 @@ const char *gd_bitrot_op_list[GF_BITROT_OPTION_TYPE_MAX] = { int __glusterd_handle_bitrot (rpcsvc_request_t *req) { - int32_t ret = -1; - gf_cli_req cli_req = { {0,} }; - dict_t *dict = NULL; - glusterd_op_t cli_op = GD_OP_BITROT; - char *volname = NULL; - int32_t type = 0; + int32_t ret = -1; + gf_cli_req cli_req = { {0,} }; + dict_t *dict = NULL; + glusterd_op_t cli_op = GD_OP_BITROT; + char *volname = NULL; + char *scrub = NULL; + int32_t type = 0; char msg[2048] = {0,}; - xlator_t *this = NULL; - glusterd_conf_t *conf = NULL; + xlator_t *this = NULL; + glusterd_conf_t *conf = NULL; GF_ASSERT (req); @@ -109,6 +110,34 @@ __glusterd_handle_bitrot (rpcsvc_request_t *req) goto out; } + if (type == GF_BITROT_CMD_SCRUB_STATUS) { + /* Backward compatibility handling for scrub status command*/ + if (conf->op_version < GD_OP_VERSION_3_7_7) { + snprintf (msg, sizeof (msg), "Cannot execute command. " + "The cluster is operating at version %d. " + "Bitrot scrub status command unavailable in " + "this version", conf->op_version); + ret = -1; + goto out; + } + + ret = dict_get_str (dict, "scrub-value", &scrub); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, + GD_MSG_DICT_GET_FAILED, + "Failed to get scrub value."); + ret = -1; + goto out; + } + + if (!strncmp (scrub, "status", strlen ("status"))) { + ret = glusterd_op_begin_synctask (req, + GD_OP_SCRUB_STATUS, + dict); + goto out; + } + } + ret = glusterd_op_begin_synctask (req, GD_OP_BITROT, dict); out: @@ -542,6 +571,7 @@ glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict) op_errstr); if (ret) goto out; + case GF_BITROT_CMD_SCRUB_STATUS: break; default: diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 7db62e98b16..8bc47fc8c49 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -685,6 +685,8 @@ glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req, { int ret = -1; gd1_mgmt_brick_op_req *brick_req = NULL; + char xlname[1024] = {0,}; + char *volname = NULL; GF_ASSERT (op < GD_OP_MAX); GF_ASSERT (op > GD_OP_NONE); @@ -713,6 +715,20 @@ glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req, break; + case GD_OP_SCRUB_STATUS: + brick_req = GF_CALLOC (1, sizeof(*brick_req), + gf_gld_mt_mop_brick_req_t); + if (!brick_req) + goto out; + + brick_req->op = GLUSTERD_NODE_BITROT; + + ret = dict_get_str (dict, "volname", &volname); + if (ret) + goto out; + + brick_req->name = gf_strdup (volname); + break; default: goto out; } @@ -4035,6 +4051,7 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx) case GD_OP_DEFRAG_BRICK_VOLUME: case GD_OP_BARRIER: case GD_OP_BITROT: + case GD_OP_SCRUB_STATUS: { do_common = _gf_true; } @@ -4616,6 +4633,7 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx) * same */ case GD_OP_DEFRAG_BRICK_VOLUME: + case GD_OP_SCRUB_STATUS: ret = dict_get_int32 (op_ctx, "count", &count); if (ret) { gf_msg_debug (this->name, 0, @@ -4663,6 +4681,13 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx) GD_MSG_CONVERSION_FAILED, "Failed uuid to hostname conversion"); + /* Since Both rebalance and bitrot scrub status are going to + * use same code path till here, we should break in case + * of scrub status */ + if (op == GD_OP_SCRUB_STATUS) { + break; + } + ret = glusterd_op_check_peer_defrag_status (op_ctx, count); if (ret) gf_msg (this->name, GF_LOG_ERROR, 0, @@ -5258,6 +5283,7 @@ glusterd_need_brick_op (glusterd_op_t op) case GD_OP_STATUS_VOLUME: case GD_OP_DEFRAG_BRICK_VOLUME: case GD_OP_HEAL_VOLUME: + case GD_OP_SCRUB_STATUS: ret = _gf_true; break; default: @@ -5520,6 +5546,7 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr, break; case GD_OP_BITROT: + case GD_OP_SCRUB_STATUS: ret = glusterd_op_stage_bitrot (dict, op_errstr, rsp_dict); break; @@ -5644,6 +5671,7 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr, break; case GD_OP_BITROT: + case GD_OP_SCRUB_STATUS: ret = glusterd_op_bitrot (dict, op_errstr, rsp_dict); break; @@ -6808,6 +6836,68 @@ out: return ret; } +static int +glusterd_bricks_select_scrub (dict_t *dict, char **op_errstr, + struct cds_list_head *selected) +{ + int ret = -1; + char *volname = NULL; + char msg[2048] = {0,}; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + glusterd_volinfo_t *volinfo = NULL; + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_pending_node_t *pending_node = NULL; + + this = THIS; + priv = this->private; + GF_ASSERT (this); + GF_ASSERT (priv); + + GF_ASSERT (dict); + + ret = dict_get_str (dict, "volname", &volname); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, + GD_MSG_DICT_GET_FAILED, "Unable to get" + " volname"); + goto out; + } + + ret = glusterd_volinfo_find (volname, &volinfo); + if (ret) { + snprintf (msg, sizeof (msg), "Volume %s does not exist", + volname); + + *op_errstr = gf_strdup (msg); + gf_msg (this->name, GF_LOG_ERROR, EINVAL, + GD_MSG_VOL_NOT_FOUND, "%s", msg); + goto out; + } + + if (!priv->scrub_svc.online) { + ret = 0; + snprintf (msg, sizeof (msg), "Scrubber daemon is not running"); + + gf_msg_debug (this->name, 0, "%s", msg); + goto out; + } + + pending_node = GF_CALLOC (1, sizeof (*pending_node), + gf_gld_mt_pending_node_t); + if (!pending_node) { + ret = -1; + goto out; + } + + pending_node->node = &(priv->scrub_svc); + pending_node->type = GD_NODE_SCRUB; + cds_list_add_tail (&pending_node->list, selected); + pending_node = NULL; +out: + gf_msg_debug (this->name, 0, "Returning %d", ret); + return ret; +} /* Select the bricks to send the barrier request to. * This selects the bricks of the given volume which are present on this peer * and are running @@ -7021,6 +7111,9 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr, case GD_OP_SNAP: ret = glusterd_bricks_select_snap (dict, op_errstr, selected); break; + case GD_OP_SCRUB_STATUS: + ret = glusterd_bricks_select_scrub (dict, op_errstr, selected); + break; default: break; } diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 8a826521f56..fd51255f65b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -142,6 +142,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret, case GD_OP_SNAP: case GD_OP_BARRIER: case GD_OP_BITROT: + case GD_OP_SCRUB_STATUS: { /*nothing specific to be done*/ break; @@ -2234,6 +2235,7 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this, if ((pending_node->type == GD_NODE_NFS) || (pending_node->type == GD_NODE_QUOTAD) || (pending_node->type == GD_NODE_SNAPD) || + (pending_node->type == GD_NODE_SCRUB) || ((pending_node->type == GD_NODE_SHD) && (req_ctx->op == GD_OP_STATUS_VOLUME))) ret = glusterd_node_op_build_payload diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 064077278bd..a0b856160c9 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -304,6 +304,9 @@ glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp) goto out; break; + case GD_OP_SCRUB_STATUS: + ret = glusterd_volume_bitrot_scrub_use_rsp_dict (aggr, rsp); + break; default: break; } @@ -932,7 +935,7 @@ gd_syncop_mgmt_brick_op (struct rpc_clnt *rpc, glusterd_pending_node_t *pnode, args.op_errno = ENOTCONN; if ((pnode->type == GD_NODE_NFS) || - (pnode->type == GD_NODE_QUOTAD) || + (pnode->type == GD_NODE_QUOTAD) || (pnode->type == GD_NODE_SCRUB) || ((pnode->type == GD_NODE_SHD) && (op == GD_OP_STATUS_VOLUME))) { ret = glusterd_node_op_build_payload (op, &req, dict_out); diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 6f24e9274ff..c31c394f661 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -4222,7 +4222,8 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node) } else if (pending_node->type == GD_NODE_SHD || pending_node->type == GD_NODE_NFS || - pending_node->type == GD_NODE_QUOTAD) { + pending_node->type == GD_NODE_QUOTAD || + pending_node->type == GD_NODE_SCRUB) { svc = pending_node->node; rpc = svc->conn.rpc; } else if (pending_node->type == GD_NODE_REBALANCE) { @@ -8240,6 +8241,393 @@ out: return ret; } +int +glusterd_volume_bitrot_scrub_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict) +{ + int ret = -1; + uint64_t value = 0; + int32_t count = 0; + char key[256] = {0,}; + uint64_t error_count = 0; + uint64_t scrubbed_files = 0; + uint64_t unsigned_files = 0; + uint64_t scrub_duration = 0; + uint64_t last_scrub_time = 0; + char *volname = NULL; + char *node_uuid = NULL; + char *node_uuid_str = NULL; + char *bitd_log = NULL; + char *scrub_log = NULL; + char *scrub_freq = NULL; + char *scrub_state = NULL; + char *scrub_impact = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + glusterd_volinfo_t *volinfo = NULL; + int src_count = 0; + int dst_count = 0; + + this = THIS; + GF_ASSERT (this); + + priv = this->private; + GF_ASSERT (priv); + + ret = dict_get_str (aggr, "volname", &volname); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find (volname, &volinfo); + if (ret) { + gf_msg (THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, + "Unable to find volinfo for volume: %s", volname); + goto out; + } + + ret = dict_get_int32 (aggr, "count", &dst_count); + + ret = dict_get_int32 (rsp_dict, "count", &src_count); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "failed to get count value"); + ret = 0; + goto out; + } + + ret = dict_set_int32 (aggr, "count", src_count+dst_count); + if (ret) + gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Failed to set count in dictonary"); + + snprintf (key, 256, "node-uuid-%d", src_count); + ret = dict_get_str (rsp_dict, key, &node_uuid); + if (!ret) { + node_uuid_str = gf_strdup (node_uuid); + + memset (key, 0, 256); + snprintf (key, 256, "node-uuid-%d", src_count+dst_count); + ret = dict_set_dynstr (aggr, key, node_uuid_str); + if (ret) { + gf_msg_debug (this->name, 0, "failed to set node-uuid"); + } + } + + memset (key, 0, 256); + snprintf (key, 256, "scrubbed-files-%d", src_count); + ret = dict_get_uint64 (rsp_dict, key, &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "scrubbed-files-%d", src_count+dst_count); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrubbed-file value"); + } + } + + memset (key, 0, 256); + snprintf (key, 256, "unsigned-files-%d", src_count); + ret = dict_get_uint64 (rsp_dict, key, &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "unsigned-files-%d", src_count+dst_count); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "unsigned-file value"); + } + } + + memset (key, 0, 256); + snprintf (key, 256, "last-scrub-time-%d", src_count); + ret = dict_get_uint64 (rsp_dict, key, &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "last-scrub-time-%d", src_count+dst_count); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "last scrub time value"); + } + } + + memset (key, 0, 256); + snprintf (key, 256, "scrub-duration-%d", src_count); + ret = dict_get_uint64 (rsp_dict, key, &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "scrub-duration-%d", src_count+dst_count); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrubbed-duration value"); + } + } + + memset (key, 0, 256); + snprintf (key, 256, "error-count-%d", src_count); + ret = dict_get_uint64 (rsp_dict, key, &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "error-count-%d", src_count+dst_count); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set error " + "count value"); + } + } + + ret = dict_get_str (rsp_dict, "bitrot_log_file", &bitd_log); + if (!ret) { + ret = dict_set_str (aggr, "bitrot_log_file", bitd_log); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "bitrot log file location"); + goto out; + } + } + + ret = dict_get_str (rsp_dict, "scrub_log_file", &scrub_log); + if (!ret) { + ret = dict_set_str (aggr, "scrub_log_file", scrub_log); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrubber log file location"); + goto out; + } + } + + ret = dict_get_str (rsp_dict, "features.scrub-freq", &scrub_freq); + if (!ret) { + ret = dict_set_str (aggr, "features.scrub-freq", scrub_freq); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrub-frequency value to dictionary"); + goto out; + } + } + + ret = dict_get_str (rsp_dict, "features.scrub-throttle", &scrub_impact); + if (!ret) { + ret = dict_set_str (aggr, "features.scrub-throttle", + scrub_impact); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrub-throttle value to dictionary"); + goto out; + } + } + + ret = dict_get_str (rsp_dict, "features.scrub", &scrub_state); + if (!ret) { + ret = dict_set_str (aggr, "features.scrub", scrub_state); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrub state value to dictionary"); + goto out; + } + } + + ret = 0; +out: + return ret; +} + +int +glusterd_bitrot_volume_node_rsp (dict_t *aggr, dict_t *rsp_dict) +{ + int ret = -1; + uint64_t value = 0; + int32_t count = 0; + int32_t index = 0; + char key[256] = {0,}; + char buf[1024] = {0,}; + uint64_t error_count = 0; + int32_t i = 0; + uint64_t scrubbed_files = 0; + uint64_t unsigned_files = 0; + uint64_t scrub_duration = 0; + uint64_t last_scrub_time = 0; + char *volname = NULL; + char *node_str = NULL; + char *scrub_freq = NULL; + char *scrub_state = NULL; + char *scrub_impact = NULL; + xlator_t *this = NULL; + glusterd_conf_t *priv = NULL; + glusterd_volinfo_t *volinfo = NULL; + + this = THIS; + GF_ASSERT (this); + + priv = this->private; + GF_ASSERT (priv); + + ret = dict_set_str (aggr, "bitrot_log_file", + (priv->bitd_svc.proc.logfile)); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Failed to set bitrot log file location"); + goto out; + } + + ret = dict_set_str (aggr, "scrub_log_file", + (priv->scrub_svc.proc.logfile)); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Failed to set scrubber log file location"); + goto out; + } + + ret = dict_get_str (aggr, "volname", &volname); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, + "Unable to get volume name"); + goto out; + } + + ret = glusterd_volinfo_find (volname, &volinfo); + if (ret) { + gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, + "Unable to find volinfo for volume: %s", volname); + goto out; + } + + ret = dict_get_int32 (aggr, "count", &i); + i++; + + ret = dict_set_int32 (aggr, "count", i); + if (ret) + gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "Failed to set count"); + + snprintf (buf, 1024, "%s", uuid_utoa (MY_UUID)); + + snprintf (key, 256, "node-uuid-%d", i); + ret = dict_set_dynstr_with_alloc (aggr, key, buf); + if (ret) + gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, + "failed to set node-uuid"); + + ret = dict_get_str (volinfo->dict, "features.scrub-freq", &scrub_freq); + if (!ret) { + ret = dict_set_str (aggr, "features.scrub-freq", scrub_freq); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrub-frequency value to dictionary"); + } + } else { + /* By Default scrub-frequency is bi-weekly. So when user + * enable bitrot then scrub-frequency value will not be + * present in volinfo->dict. Setting by-default value of + * scrub-frequency explicitly for presenting it to scrub + * status. + */ + ret = dict_set_dynstr_with_alloc (aggr, "features.scrub-freq", + "biweekly"); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrub-frequency value to dictionary"); + } + } + + ret = dict_get_str (volinfo->dict, "features.scrub-throttle", + &scrub_impact); + if (!ret) { + ret = dict_set_str (aggr, "features.scrub-throttle", + scrub_impact); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrub-throttle value to dictionary"); + } + } else { + /* By Default scrub-throttle is lazy. So when user + * enable bitrot then scrub-throttle value will not be + * present in volinfo->dict. Setting by-default value of + * scrub-throttle explicitly for presenting it to + * scrub status. + */ + ret = dict_set_dynstr_with_alloc (aggr, + "features.scrub-throttle", + "lazy"); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrub-throttle value to dictionary"); + } + } + + ret = dict_get_str (volinfo->dict, "features.scrub", &scrub_state); + if (!ret) { + ret = dict_set_str (aggr, "features.scrub", scrub_state); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrub state value to dictionary"); + } + } + + ret = dict_get_uint64 (rsp_dict, "scrubbed-files", &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "scrubbed-files-%d", i); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrubbed-file value"); + } + } + + ret = dict_get_uint64 (rsp_dict, "unsigned-files", &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "unsigned-files-%d", i); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "unsigned-file value"); + } + } + + ret = dict_get_uint64 (rsp_dict, "last-scrub-time", &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "last-scrub-time-%d", i); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "last scrub time value"); + } + } + + ret = dict_get_uint64 (rsp_dict, "scrub-duration", &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "scrub-duration-%d", i); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set " + "scrubbed-duration value"); + } + } + + ret = dict_get_uint64 (rsp_dict, "error-count", &value); + if (!ret) { + memset (key, 0, 256); + snprintf (key, 256, "error-count-%d", i); + ret = dict_set_uint64 (aggr, key, value); + if (ret) { + gf_msg_debug (this->name, 0, "Failed to set error " + "count value"); + } + } + + ret = 0; +out: + return ret; +} + int glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict) { @@ -9139,6 +9527,10 @@ glusterd_handle_node_rsp (dict_t *req_dict, void *pending_entry, ret = glusterd_heal_volume_brick_rsp (req_dict, rsp_dict, op_ctx, op_errstr); break; + case GD_OP_SCRUB_STATUS: + ret = glusterd_bitrot_volume_node_rsp (op_ctx, rsp_dict); + + break; default: break; } diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index b7302c8cb91..f3895db408b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -436,6 +436,11 @@ int32_t glusterd_handle_node_rsp (dict_t *req_ctx, void *pending_entry, glusterd_op_t op, dict_t *rsp_dict, dict_t *op_ctx, char **op_errstr, gd_node_type type); +int +glusterd_volume_bitrot_scrub_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict); + +int +glusterd_volume_heal_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict); int32_t glusterd_check_if_quota_trans_enabled (glusterd_volinfo_t *volinfo); diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index 124c6cf0f9f..817112e9aca 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -116,6 +116,7 @@ typedef enum glusterd_op_ { GD_OP_BITROT, GD_OP_DETACH_TIER, GD_OP_TIER_MIGRATE, + GD_OP_SCRUB_STATUS, GD_OP_MAX, } glusterd_op_t; @@ -276,6 +277,20 @@ typedef struct _auth auth_t; #define CAPS_OFFLOAD_SNAPSHOT 0x00000008 #define CAPS_OFFLOAD_ZERO 0x00000020 +struct glusterd_bitrot_scrub_ { + char *scrub_state; + char *scrub_impact; + char *scrub_freq; + uint64_t scrubbed_files; + uint64_t unsigned_files; + uint64_t last_scrub_time; + uint64_t scrub_duration; + uint64_t error_count; +}; + +typedef struct glusterd_bitrot_scrub_ glusterd_bitrot_scrub_t; + + struct glusterd_rebalance_ { gf_defrag_status_t defrag_status; uint64_t rebalance_files; @@ -382,6 +397,9 @@ struct glusterd_volinfo_ { /* Replace brick status */ glusterd_replace_brick_t rep_brick; + /* Bitrot scrub status*/ + glusterd_bitrot_scrub_t bitrot_scrub; + int version; uint32_t quota_conf_version; uint32_t cksum; -- cgit