diff options
| -rw-r--r-- | cli/src/cli-cmd-parser.c | 19 | ||||
| -rw-r--r-- | cli/src/cli-cmd-volume.c | 2 | ||||
| -rw-r--r-- | cli/src/cli-rpc-ops.c | 163 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-messages.h | 6 | ||||
| -rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 83 | ||||
| -rw-r--r-- | libglusterfs/src/globals.h | 4 | ||||
| -rw-r--r-- | libglusterfs/src/glusterfs.h | 1 | ||||
| -rw-r--r-- | rpc/rpc-lib/src/protocol-common.h | 1 | ||||
| -rw-r--r-- | rpc/xdr/src/cli1-xdr.x | 1 | ||||
| -rw-r--r-- | xlators/features/bit-rot/src/bitd/bit-rot.c | 18 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-bitrot.c | 46 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-op-sm.c | 93 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 2 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-syncop.c | 5 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 394 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 5 | ||||
| -rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.h | 18 | 
17 files changed, 846 insertions, 15 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c index ab18aa1ae6d..cc0a9aba96c 100644 --- a/cli/src/cli-cmd-parser.c +++ b/cli/src/cli-cmd-parser.c @@ -5094,7 +5094,7 @@ cli_cmd_bitrot_parse (const char **words, int wordcount, dict_t **options)                                                       "biweekly", "monthly",                                                        NULL};          char               *scrub_values[]        = {"pause", "resume", -                                                      NULL}; +                                                     "status", NULL};          dict_t             *dict                  = NULL;          gf_bitrot_type     type                   = GF_BITROT_OPTION_TYPE_NONE;          int32_t            expiry_time            = 0; @@ -5158,6 +5158,23 @@ cli_cmd_bitrot_parse (const char **words, int wordcount, dict_t **options)                  }          } +        if ((strcmp (words[3], "scrub") == 0) && +            (strcmp (words[4], "status") == 0)) { +                if (wordcount == 5) { +                        type = GF_BITROT_CMD_SCRUB_STATUS; +                        ret =  dict_set_str (dict, "scrub-value", +                                             (char *) words[4]); +                        if (ret) { +                                cli_out ("Failed to set dict for scrub status"); +                                goto out; +                        } +                        goto set_type; +                } else { +                        ret = -1; +                        goto out; +                } +        } +          if (!strcmp (w, "scrub-throttle")) {                  if (!words[4]) {                          cli_err ("Missing scrub-throttle value for bitrot " diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c index 64f1d9d1b38..48ac4201ac9 100644 --- a/cli/src/cli-cmd-volume.c +++ b/cli/src/cli-cmd-volume.c @@ -2788,7 +2788,7 @@ struct cli_cmd volume_cmds[] = {           "volume bitrot <volname> scrub-throttle {lazy|normal|aggressive} |\n"           "volume bitrot <volname> scrub-frequency {hourly|daily|weekly|biweekly"           "|monthly} |\n" -         "volume bitrot <volname> scrub {pause|resume}", +         "volume bitrot <volname> scrub {pause|resume|status}",           cli_cmd_bitrot_cbk,           "Bitrot translator specific operation. For more information about "           "bitrot command type  'man gluster'" diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c index 75e5ad49640..24ca507c904 100644 --- a/cli/src/cli-rpc-ops.c +++ b/cli/src/cli-rpc-ops.c @@ -10672,10 +10672,156 @@ out:  }  int +gf_cli_print_bitrot_scrub_status (dict_t *dict) +{ +        int            i                = 1; +        int            ret              = -1; +        int            count            = 0; +        char           key[256]         = {0,}; +        char           *volname         = NULL; +        char           *node_name       = NULL; +        char           *scrub_freq      = NULL; +        char           *state_scrub     = NULL; +        char           *scrub_impact    = NULL; +        char           *scrub_log_file  = NULL; +        char           *bitrot_log_file = NULL; +        uint64_t       scrub_files      = 0; +        uint64_t       unsigned_files   = 0; +        uint64_t       scrub_time       = 0; +        uint64_t       last_scrub       = 0; +        uint64_t       error_count      = 0; + + +        ret = dict_get_str (dict, "volname", &volname); +        if (ret) +                gf_log ("cli", GF_LOG_TRACE, "failed to get volume name"); + +        ret = dict_get_str (dict, "features.scrub", &state_scrub); +        if (ret) +                gf_log ("cli", GF_LOG_TRACE, "failed to get scrub state value"); + +        ret = dict_get_str (dict, "features.scrub-throttle", &scrub_impact); +        if (ret) +                gf_log ("cli", GF_LOG_TRACE, "failed to get scrub impact " +                        "value"); + +        ret = dict_get_str (dict, "features.scrub-freq", &scrub_freq); +        if (ret) +                gf_log ("cli", GF_LOG_TRACE, "failed to get scrub -freq value"); + +        ret = dict_get_str (dict, "bitrot_log_file", &bitrot_log_file); +        if (ret) +                gf_log ("cli", GF_LOG_TRACE, "failed to get bitrot log file " +                        "location"); + +        ret = dict_get_str (dict, "scrub_log_file", &scrub_log_file); +        if (ret) +                gf_log ("cli", GF_LOG_TRACE, "failed to get scrubber log file " +                        "location"); + +        ret = dict_get_int32 (dict, "count", &count); +        if (ret) { +                gf_log ("cli", GF_LOG_ERROR, "count not get count value from" +                        " dictionary"); +                goto out; +        } + +        cli_out ("\n%s: %s\n", "Volume name ", volname); + +        cli_out ("%s: %s\n", "State of scrub", state_scrub); + +        cli_out ("%s: %s\n", "Scrub impact", scrub_impact); + +        cli_out ("%s: %s\n", "Scrub frequency", scrub_freq); + +        cli_out ("%s: %s\n", "Bitrot error log location", bitrot_log_file); + +        cli_out ("%s: %s\n", "Scrubber error log location", scrub_log_file); + + +        for (i = 1; i <= count; i++) { +                /* Reset the variables to prevent carryover of values */ +                node_name       = NULL; +                last_scrub      = 0; +                scrub_time      = 0; +                error_count     = 0; +                scrub_files     = 0; +                unsigned_files  = 0; + +                memset (key, 0, 256); +                snprintf (key, 256, "node-name-%d", i); +                ret = dict_get_str (dict, key, &node_name); +                if (ret) +                        gf_log ("cli", GF_LOG_TRACE, "failed to get node-name"); + +                memset (key, 0, 256); +                snprintf (key, 256, "scrubbed-files-%d", i); +                ret = dict_get_uint64 (dict, key, &scrub_files); +                if (ret) +                        gf_log ("cli", GF_LOG_TRACE, "failed to get scrubbed " +                                "files"); + +                memset (key, 0, 256); +                snprintf (key, 256, "unsigned-files-%d", i); +                ret = dict_get_uint64 (dict, key, &unsigned_files); +                if (ret) +                        gf_log ("cli", GF_LOG_TRACE, "failed to get unsigned " +                                "files"); + +                memset (key, 0, 256); +                snprintf (key, 256, "scrub-duration-%d", i); +                ret = dict_get_uint64 (dict, key, &scrub_time); +                if (ret) +                        gf_log ("cli", GF_LOG_TRACE, "failed to get last scrub " +                                "duration"); + +                memset (key, 0, 256); +                snprintf (key, 256, "last-scrub-time-%d", i); +                ret = dict_get_uint64 (dict, key, &last_scrub); +                if (ret) +                        gf_log ("cli", GF_LOG_TRACE, "failed to get last scrub" +                                " time"); + +                memset (key, 0, 256); +                snprintf (key, 256, "error-count-%d", i); +                ret = dict_get_uint64 (dict, key, &error_count); +                if (ret) +                        gf_log ("cli", GF_LOG_TRACE, "failed to get error " +                                "count"); + +                cli_out ("\n%s\n", "==========================================" +                         "==============="); + +                cli_out ("%s: %s\n", "Node name", node_name); + +                cli_out ("%s: %"PRIu64 "\n", "Number of Scrubbed files", +                          scrub_files); + +                cli_out ("%s: %"PRIu64 "\n", "Number of Unsigned files", +                          unsigned_files); + +                cli_out ("%s: %"PRIu64 "\n", "Last completed scrub time", +                          scrub_time); + +                cli_out ("%s: %"PRIu64 "\n", "Duration of last scrub", +                          last_scrub); + +                cli_out ("%s: %"PRIu64 "\n", "Error count", error_count); + +        } +        cli_out ("%s\n", "==========================================" +                 "==============="); + +out: +        return 0; +} + +int  gf_cli_bitrot_cbk (struct rpc_req *req, struct iovec *iov,                     int count, void *myframe)  {          int                  ret                       = -1; +        int                  type                      = 0;          gf_cli_rsp           rsp                       = {0, };          dict_t               *dict                     = NULL;          call_frame_t         *frame                    = NULL; @@ -10729,6 +10875,22 @@ gf_cli_bitrot_cbk (struct rpc_req *req, struct iovec *iov,          gf_log ("cli", GF_LOG_DEBUG, "Received resp to bit rot command"); +        ret = dict_get_int32 (dict, "type", &type); +        if (ret) { +                gf_log ("cli", GF_LOG_ERROR, "Failed to get command type"); +                goto out; +        } + +        if ((type == GF_BITROT_CMD_SCRUB_STATUS) && +             !(global_state->mode & GLUSTER_MODE_XML)) { +                ret = gf_cli_print_bitrot_scrub_status (dict); +                if (ret) { +                        gf_log ("cli", GF_LOG_ERROR, "Failed to print bitrot " +                                "scrub status"); +                } +                goto out; +        } +  xml_output:          if (global_state->mode & GLUSTER_MODE_XML) {                  ret = cli_xml_output_vol_profile (dict, rsp.op_ret, @@ -10755,7 +10917,6 @@ out:          cli_cmd_broadcast_response (ret);          return ret; -  }  int32_t diff --git a/glusterfsd/src/glusterfsd-messages.h b/glusterfsd/src/glusterfsd-messages.h index 1e1d170e48a..7bf1a3fb5b3 100644 --- a/glusterfsd/src/glusterfsd-messages.h +++ b/glusterfsd/src/glusterfsd-messages.h @@ -41,7 +41,7 @@   */  #define GLFS_COMP_BASE          GLFS_MSGID_COMP_GLUSTERFSD -#define GLFS_NUM_MESSAGES       34 +#define GLFS_NUM_MESSAGES       36  #define GLFS_MSGID_END          (GLFS_COMP_BASE + GLFS_NUM_MESSAGES + 1)  /* Messaged with message IDs */  #define glfs_msg_start_x GLFS_COMP_BASE, "Invalid: Start of messages" @@ -109,6 +109,10 @@                          "was provided"  #define glusterfsd_msg_34 (GLFS_COMP_BASE + 34), "memory accounting init" \                          " failed." +#define glusterfsd_msg_35 (GLFS_COMP_BASE + 35), "rpc req buffer " \ +                        " unserialization failed." +#define glusterfsd_msg_36 (GLFS_COMP_BASE + 36), "problem in xlator " \ +                        " loading."  /*------------*/  #define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index 8fe7f1b8563..d3d79797cbf 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -26,6 +26,7 @@  #include "rpc-clnt.h"  #include "protocol-common.h" +#include "glusterfsd-messages.h"  #include "glusterfs3.h"  #include "portmap-xdr.h"  #include "xdr-generic.h" @@ -645,6 +646,87 @@ out:          return 0;  } +int +glusterfs_handle_bitrot (rpcsvc_request_t *req) +{ +        int32_t                  ret          = -1; +        gd1_mgmt_brick_op_req    xlator_req   = {0,}; +        dict_t                   *input       = NULL; +        dict_t                   *output      = NULL; +        xlator_t                 *any         = NULL; +        xlator_t                 *this        = NULL; +        xlator_t                 *xlator      = NULL; +        char                     msg[2048]    = {0,}; +        char                     xname[1024]  = {0,}; +        glusterfs_ctx_t          *ctx         = NULL; +        glusterfs_graph_t        *active      = NULL; + +        GF_ASSERT (req); +        this = THIS; +        GF_ASSERT (this); + +        ret = xdr_to_generic (req->msg[0], &xlator_req, +                             (xdrproc_t)xdr_gd1_mgmt_brick_op_req); + +        if (ret < 0) { +                /*failed to decode msg;*/ +                req->rpc_err = GARBAGE_ARGS; +                goto out; +        } + +        ctx = glusterfsd_ctx; +        GF_ASSERT (ctx); + +        active = ctx->active; +        if (!active) { +                req->rpc_err = GARBAGE_ARGS; +                goto out; +        } + +        any = active->first; + +        input = dict_new (); +        if (!input) +                goto out; + +        ret = dict_unserialize (xlator_req.input.input_val, +                                xlator_req.input.input_len, +                                &input); + +        if (ret < 0) { +                gf_msg (this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35); +                goto out; +        } + +        /* Send scrubber request to bitrot xlator */ +        snprintf (xname, sizeof (xname), "%s-bit-rot-0", xlator_req.name); +        xlator = xlator_search_by_name (any, xname); +        if (!xlator) { +                snprintf (msg, sizeof (msg), "xlator %s is not loaded", xname); +                gf_msg (this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36); +                goto out; +        } + +        output = dict_new (); +        if (!output) { +                ret = -1; +                goto out; +        } + +        ret = xlator->notify (xlator, GF_EVENT_SCRUB_STATUS, input, +                              output); +out: +        glusterfs_translator_info_response_send (req, ret, msg, output); + +        if (input) +                dict_unref (input); +        free (xlator_req.input.input_val); /*malloced by xdr*/ +        if (output) +                dict_unref (output); +        free (xlator_req.name); + +        return 0; +}  int  glusterfs_handle_defrag (rpcsvc_request_t *req) @@ -1398,6 +1480,7 @@ rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {          [GLUSTERD_NODE_STATUS]         = {"NFS STATUS",        GLUSTERD_NODE_STATUS,         glusterfs_handle_node_status,         NULL, 0, DRC_NA},          [GLUSTERD_VOLUME_BARRIER_OP]   = {"VOLUME BARRIER OP", GLUSTERD_VOLUME_BARRIER_OP,   glusterfs_handle_volume_barrier_op,   NULL, 0, DRC_NA},          [GLUSTERD_BRICK_BARRIER]       = {"BARRIER",           GLUSTERD_BRICK_BARRIER,       glusterfs_handle_barrier,             NULL, 0, DRC_NA}, +        [GLUSTERD_NODE_BITROT]         = {"BITROT",            GLUSTERD_NODE_BITROT,         glusterfs_handle_bitrot,              NULL, 0, DRC_NA},  };  struct rpcsvc_program glusterfs_mop_prog = { diff --git a/libglusterfs/src/globals.h b/libglusterfs/src/globals.h index 6983837d6e6..be6a06bbafb 100644 --- a/libglusterfs/src/globals.h +++ b/libglusterfs/src/globals.h @@ -37,7 +37,7 @@   */  #define GD_OP_VERSION_MIN  1 /* MIN is the fresh start op-version, mostly                                  should not change */ -#define GD_OP_VERSION_MAX  GD_OP_VERSION_3_7_6 /* MAX VERSION is the maximum +#define GD_OP_VERSION_MAX  GD_OP_VERSION_3_7_7 /* MAX VERSION is the maximum                                                    count in VME table, should                                                    keep changing with                                                    introduction of newer @@ -59,6 +59,8 @@  #define GD_OP_VERSION_3_7_6    30706 /* Op-version for GlusterFS 3.7.6 */ +#define GD_OP_VERSION_3_7_7    30707 /* Op-version for GlusterFS 3.7.7 */ +  #define GD_OP_VER_PERSISTENT_AFR_XATTRS GD_OP_VERSION_3_6_0  #include "xlator.h" diff --git a/libglusterfs/src/glusterfs.h b/libglusterfs/src/glusterfs.h index a3028522aaa..1988c33d6c4 100644 --- a/libglusterfs/src/glusterfs.h +++ b/libglusterfs/src/glusterfs.h @@ -621,6 +621,7 @@ typedef enum {          GF_EVENT_PARENT_DOWN,          GF_EVENT_VOLUME_BARRIER_OP,          GF_EVENT_UPCALL, +        GF_EVENT_SCRUB_STATUS,          GF_EVENT_MAXVAL,  } glusterfs_event_t; diff --git a/rpc/rpc-lib/src/protocol-common.h b/rpc/rpc-lib/src/protocol-common.h index 12031738e0c..96d315c5e79 100644 --- a/rpc/rpc-lib/src/protocol-common.h +++ b/rpc/rpc-lib/src/protocol-common.h @@ -217,6 +217,7 @@ enum glusterd_brick_procnum {          GLUSTERD_NODE_STATUS,          GLUSTERD_VOLUME_BARRIER_OP,          GLUSTERD_BRICK_BARRIER, +        GLUSTERD_NODE_BITROT,          GLUSTERD_BRICK_MAXVALUE,  }; diff --git a/rpc/xdr/src/cli1-xdr.x b/rpc/xdr/src/cli1-xdr.x index 56f34bc2dae..231b5261f0e 100644 --- a/rpc/xdr/src/cli1-xdr.x +++ b/rpc/xdr/src/cli1-xdr.x @@ -43,6 +43,7 @@ enum gf_bitrot_type {          GF_BITROT_OPTION_TYPE_SCRUB_FREQ,          GF_BITROT_OPTION_TYPE_SCRUB,          GF_BITROT_OPTION_TYPE_EXPIRY_TIME, +        GF_BITROT_CMD_SCRUB_STATUS,          GF_BITROT_OPTION_TYPE_MAX  }; diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.c b/xlators/features/bit-rot/src/bitd/bit-rot.c index 43d7faeec09..c5a8a75f847 100644 --- a/xlators/features/bit-rot/src/bitd/bit-rot.c +++ b/xlators/features/bit-rot/src/bitd/bit-rot.c @@ -1565,9 +1565,12 @@ int  notify (xlator_t *this, int32_t event, void *data, ...)  {          int           idx    = -1; +        int           ret    = -1;          xlator_t     *subvol = NULL;          br_child_t   *child  = NULL;          br_private_t *priv   = NULL; +        dict_t       *output = NULL; +        va_list       ap;          subvol = (xlator_t *)data;          priv = this->private; @@ -1634,6 +1637,21 @@ notify (xlator_t *this, int32_t event, void *data, ...)                          default_notify (this, event, data);                  break; +        case GF_EVENT_SCRUB_STATUS: +                gf_log (this->name, GF_LOG_INFO, "BitRot scrub status " +                        "called"); +                va_start (ap, data); +                output = va_arg (ap, dict_t *); + +                /* As of now hardcoding last-scrub-time value. At the time of +                 * Final patch submission this option value along with other +                 * few option value will be calculate based on real time */ +                ret = dict_set_uint64 (output, "last-scrub-time", 12); +                if (ret) { +                        gf_log (this->name, GF_LOG_DEBUG, "Failed to set last " +                                "scrub time value"); +                } +                break;          default:                  default_notify (this, event, data);          } diff --git a/xlators/mgmt/glusterd/src/glusterd-bitrot.c b/xlators/mgmt/glusterd/src/glusterd-bitrot.c index 897916ed860..2e0fb17d2b6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-bitrot.c +++ b/xlators/mgmt/glusterd/src/glusterd-bitrot.c @@ -44,15 +44,16 @@ const char *gd_bitrot_op_list[GF_BITROT_OPTION_TYPE_MAX] = {  int  __glusterd_handle_bitrot (rpcsvc_request_t *req)  { -        int32_t                         ret = -1; -        gf_cli_req                      cli_req = { {0,} }; -        dict_t                         *dict = NULL; -        glusterd_op_t                   cli_op = GD_OP_BITROT; -        char                           *volname = NULL; -        int32_t                         type = 0; +        int32_t                         ret       = -1; +        gf_cli_req                      cli_req   = { {0,} }; +        dict_t                         *dict      = NULL; +        glusterd_op_t                   cli_op    = GD_OP_BITROT; +        char                           *volname   = NULL; +        char                           *scrub     = NULL; +        int32_t                         type      = 0;          char                            msg[2048] = {0,}; -        xlator_t                       *this = NULL; -        glusterd_conf_t                *conf = NULL; +        xlator_t                       *this      = NULL; +        glusterd_conf_t                *conf      = NULL;          GF_ASSERT (req); @@ -114,6 +115,34 @@ __glusterd_handle_bitrot (rpcsvc_request_t *req)                  goto out;          } +        if (type == GF_BITROT_CMD_SCRUB_STATUS) { +                /* Backward compatibility handling for scrub status command*/ +                if (conf->op_version < GD_OP_VERSION_3_7_7) { +                        snprintf (msg, sizeof (msg), "Cannot execute command. " +                                  "The cluster is operating at version %d. " +                                  "Bitrot scrub status command unavailable in " +                                  "this version", conf->op_version); +                        ret = -1; +                        goto out; +                } + +                ret = dict_get_str (dict, "scrub-value", &scrub); +                if (ret) { +                        gf_msg (this->name, GF_LOG_ERROR, 0, +                                GD_MSG_DICT_GET_FAILED, +                                "Failed to get scrub value."); +                        ret = -1; +                        goto out; +                } + +                if (!strncmp (scrub, "status", strlen ("status"))) { +                        ret = glusterd_op_begin_synctask (req, +                                                          GD_OP_SCRUB_STATUS, +                                                          dict); +                        goto out; +                } +        } +          ret = glusterd_op_begin_synctask (req, GD_OP_BITROT, dict);  out: @@ -547,6 +576,7 @@ glusterd_op_bitrot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)                                                     op_errstr);                  if (ret)                          goto out; +        case GF_BITROT_CMD_SCRUB_STATUS:                  break;          default: diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c index 42a15c21430..92fc6ac946a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c +++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c @@ -689,6 +689,8 @@ glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,  {          int                     ret = -1;          gd1_mgmt_brick_op_req   *brick_req = NULL; +        char                    xlname[1024] = {0,}; +        char                    *volname = NULL;          GF_ASSERT (op < GD_OP_MAX);          GF_ASSERT (op > GD_OP_NONE); @@ -717,6 +719,20 @@ glusterd_node_op_build_payload (glusterd_op_t op, gd1_mgmt_brick_op_req **req,                  break; +        case GD_OP_SCRUB_STATUS: +                brick_req = GF_CALLOC (1, sizeof(*brick_req), +                                       gf_gld_mt_mop_brick_req_t); +                if (!brick_req) +                        goto out; + +                brick_req->op = GLUSTERD_NODE_BITROT; + +                ret = dict_get_str (dict, "volname", &volname); +                if (ret) +                        goto out; + +                brick_req->name = gf_strdup (volname); +                break;          default:                  goto out;          } @@ -4019,6 +4035,7 @@ glusterd_op_build_payload (dict_t **req, char **op_errstr, dict_t *op_ctx)                  case GD_OP_DEFRAG_BRICK_VOLUME:                  case GD_OP_BARRIER:                  case GD_OP_BITROT: +                case GD_OP_SCRUB_STATUS:                          {                                  do_common = _gf_true;                          } @@ -4600,6 +4617,7 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx)           * same           */          case GD_OP_DEFRAG_BRICK_VOLUME: +        case GD_OP_SCRUB_STATUS:                  ret = dict_get_int32 (op_ctx, "count", &count);                  if (ret) {                          gf_msg_debug (this->name, 0, @@ -4647,6 +4665,13 @@ glusterd_op_modify_op_ctx (glusterd_op_t op, void *ctx)                                  GD_MSG_CONVERSION_FAILED,                                  "Failed uuid to hostname conversion"); +                /* Since Both rebalance and bitrot scrub status are going to +                 * use same code path till here, we should break in case +                 * of scrub status */ +                if (op == GD_OP_SCRUB_STATUS) { +                        break; +                } +                  ret = glusterd_op_check_peer_defrag_status (op_ctx, count);                  if (ret)                          gf_msg (this->name, GF_LOG_ERROR, 0, @@ -5241,6 +5266,7 @@ glusterd_need_brick_op (glusterd_op_t op)          case GD_OP_STATUS_VOLUME:          case GD_OP_DEFRAG_BRICK_VOLUME:          case GD_OP_HEAL_VOLUME: +        case GD_OP_SCRUB_STATUS:                  ret = _gf_true;                  break;          default: @@ -5502,6 +5528,7 @@ glusterd_op_stage_validate (glusterd_op_t op, dict_t *dict, char **op_errstr,                          break;                  case GD_OP_BITROT: +                case GD_OP_SCRUB_STATUS:                          ret = glusterd_op_stage_bitrot (dict, op_errstr,                                                          rsp_dict);                          break; @@ -5626,6 +5653,7 @@ glusterd_op_commit_perform (glusterd_op_t op, dict_t *dict, char **op_errstr,                          break;                  case GD_OP_BITROT: +                case GD_OP_SCRUB_STATUS:                          ret = glusterd_op_bitrot (dict, op_errstr, rsp_dict);                          break; @@ -6790,6 +6818,68 @@ out:          return ret;  } +static int +glusterd_bricks_select_scrub (dict_t *dict, char **op_errstr, +                              struct cds_list_head *selected) +{ +        int                       ret           = -1; +        char                      *volname      = NULL; +        char                      msg[2048]     = {0,}; +        xlator_t                  *this         = NULL; +        glusterd_conf_t           *priv         = NULL; +        glusterd_volinfo_t        *volinfo      = NULL; +        glusterd_brickinfo_t      *brickinfo    = NULL; +        glusterd_pending_node_t   *pending_node = NULL; + +        this = THIS; +        priv = this->private; +        GF_ASSERT (this); +        GF_ASSERT (priv); + +        GF_ASSERT (dict); + +        ret = dict_get_str (dict, "volname", &volname); +        if (ret) { +                gf_msg (this->name, GF_LOG_ERROR, 0, +                        GD_MSG_DICT_GET_FAILED, "Unable to get" +                        " volname"); +                goto out; +        } + +        ret = glusterd_volinfo_find (volname, &volinfo); +        if (ret) { +                snprintf (msg, sizeof (msg), "Volume %s does not exist", +                          volname); + +                *op_errstr = gf_strdup (msg); +                gf_msg (this->name, GF_LOG_ERROR, EINVAL, +                        GD_MSG_VOL_NOT_FOUND, "%s", msg); +                goto out; +        } + +        if (!priv->scrub_svc.online) { +                ret = 0; +                snprintf (msg, sizeof (msg), "Scrubber daemon is not running"); + +                gf_msg_debug (this->name, 0, "%s", msg); +                goto out; +        } + +        pending_node = GF_CALLOC (1, sizeof (*pending_node), +                                  gf_gld_mt_pending_node_t); +        if (!pending_node) { +                ret = -1; +                goto out; +        } + +        pending_node->node = &(priv->scrub_svc); +        pending_node->type = GD_NODE_SCRUB; +        cds_list_add_tail (&pending_node->list, selected); +        pending_node = NULL; +out: +        gf_msg_debug (this->name, 0, "Returning %d", ret); +        return ret; +}  /* Select the bricks to send the barrier request to.   * This selects the bricks of the given volume which are present on this peer   * and are running @@ -7003,6 +7093,9 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr,          case GD_OP_SNAP:                  ret = glusterd_bricks_select_snap (dict, op_errstr, selected);                  break; +        case GD_OP_SCRUB_STATUS: +                ret = glusterd_bricks_select_scrub (dict, op_errstr, selected); +                break;          default:                  break;           } diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c index 3250a9bb04d..b6355e89026 100644 --- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c @@ -147,6 +147,7 @@ glusterd_op_send_cli_response (glusterd_op_t op, int32_t op_ret,          case GD_OP_SNAP:          case GD_OP_BARRIER:          case GD_OP_BITROT: +        case GD_OP_SCRUB_STATUS:          {                  /*nothing specific to be done*/                  break; @@ -2239,6 +2240,7 @@ glusterd_brick_op (call_frame_t *frame, xlator_t *this,                  if ((pending_node->type == GD_NODE_NFS) ||                      (pending_node->type == GD_NODE_QUOTAD) ||                      (pending_node->type == GD_NODE_SNAPD) || +                    (pending_node->type == GD_NODE_SCRUB) ||                      ((pending_node->type == GD_NODE_SHD) &&                       (req_ctx->op == GD_OP_STATUS_VOLUME)))                          ret = glusterd_node_op_build_payload diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c index 064077278bd..a0b856160c9 100644 --- a/xlators/mgmt/glusterd/src/glusterd-syncop.c +++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c @@ -304,6 +304,9 @@ glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)                          goto out;          break; +        case GD_OP_SCRUB_STATUS: +                ret = glusterd_volume_bitrot_scrub_use_rsp_dict (aggr, rsp); +        break;          default:          break;          } @@ -932,7 +935,7 @@ gd_syncop_mgmt_brick_op (struct rpc_clnt *rpc, glusterd_pending_node_t *pnode,          args.op_errno = ENOTCONN;          if ((pnode->type == GD_NODE_NFS) || -            (pnode->type == GD_NODE_QUOTAD) || +            (pnode->type == GD_NODE_QUOTAD) || (pnode->type == GD_NODE_SCRUB) ||              ((pnode->type == GD_NODE_SHD) && (op == GD_OP_STATUS_VOLUME))) {                  ret = glusterd_node_op_build_payload (op, &req, dict_out); diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 0683faaa63d..9adae10346b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -4147,7 +4147,8 @@ glusterd_pending_node_get_rpc (glusterd_pending_node_t *pending_node)          } else if (pending_node->type == GD_NODE_SHD ||                     pending_node->type == GD_NODE_NFS || -                   pending_node->type == GD_NODE_QUOTAD) { +                   pending_node->type == GD_NODE_QUOTAD || +                   pending_node->type == GD_NODE_SCRUB) {                  svc = pending_node->node;                  rpc = svc->conn.rpc;          } else if (pending_node->type == GD_NODE_REBALANCE) { @@ -8162,6 +8163,393 @@ out:  }  int +glusterd_volume_bitrot_scrub_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict) +{ +        int                      ret                = -1; +        uint64_t                 value              = 0; +        int32_t                  count              = 0; +        char                     key[256]           = {0,}; +        uint64_t                 error_count        = 0; +        uint64_t                 scrubbed_files     = 0; +        uint64_t                 unsigned_files     = 0; +        uint64_t                 scrub_duration     = 0; +        uint64_t                 last_scrub_time    = 0; +        char                    *volname            = NULL; +        char                    *node_uuid          = NULL; +        char                    *node_uuid_str      = NULL; +        char                    *bitd_log           = NULL; +        char                    *scrub_log          = NULL; +        char                    *scrub_freq         = NULL; +        char                    *scrub_state        = NULL; +        char                    *scrub_impact       = NULL; +        xlator_t                *this               = NULL; +        glusterd_conf_t         *priv               = NULL; +        glusterd_volinfo_t      *volinfo            = NULL; +        int                      src_count          = 0; +        int                      dst_count          = 0; + +        this = THIS; +        GF_ASSERT (this); + +        priv = this->private; +        GF_ASSERT (priv); + +        ret = dict_get_str (aggr, "volname", &volname); +        if (ret) { +                gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, +                        "Unable to get volume name"); +                goto out; +        } + +        ret = glusterd_volinfo_find (volname, &volinfo); +        if (ret) { +                gf_msg (THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, +                        "Unable to find volinfo for volume: %s", volname); +                goto out; +        } + +        ret = dict_get_int32 (aggr, "count", &dst_count); + +        ret = dict_get_int32 (rsp_dict, "count", &src_count); +        if (ret) { +                gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, +                        "failed to get count value"); +                ret = 0; +                goto out; +        } + +        ret = dict_set_int32 (aggr, "count", src_count+dst_count); +        if (ret) +                gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, +                        "Failed to set count in dictonary"); + +        snprintf (key, 256, "node-uuid-%d", src_count); +        ret = dict_get_str (rsp_dict, key, &node_uuid); +        if (!ret) { +                node_uuid_str = gf_strdup (node_uuid); + +                memset (key, 0, 256); +                snprintf (key, 256, "node-uuid-%d", src_count+dst_count); +                ret = dict_set_dynstr (aggr, key, node_uuid_str); +                if (ret) { +                        gf_msg_debug (this->name, 0, "failed to set node-uuid"); +                } +        } + +        memset (key, 0, 256); +        snprintf (key, 256, "scrubbed-files-%d", src_count); +        ret = dict_get_uint64 (rsp_dict, key, &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "scrubbed-files-%d", src_count+dst_count); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrubbed-file value"); +                } +        } + +        memset (key, 0, 256); +        snprintf (key, 256, "unsigned-files-%d", src_count); +        ret = dict_get_uint64 (rsp_dict, key, &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "unsigned-files-%d", src_count+dst_count); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "unsigned-file value"); +                } +        } + +        memset (key, 0, 256); +        snprintf (key, 256, "last-scrub-time-%d", src_count); +        ret = dict_get_uint64 (rsp_dict, key, &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "last-scrub-time-%d", src_count+dst_count); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "last scrub time value"); +                } +        } + +        memset (key, 0, 256); +        snprintf (key, 256, "scrub-duration-%d", src_count); +        ret = dict_get_uint64 (rsp_dict, key, &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "scrub-duration-%d", src_count+dst_count); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrubbed-duration value"); +                } +        } + +        memset (key, 0, 256); +        snprintf (key, 256, "error-count-%d", src_count); +        ret = dict_get_uint64 (rsp_dict, key, &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "error-count-%d", src_count+dst_count); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set error " +                                      "count value"); +                } +        } + +        ret = dict_get_str (rsp_dict, "bitrot_log_file", &bitd_log); +        if (!ret) { +                ret = dict_set_str (aggr, "bitrot_log_file", bitd_log); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "bitrot log file location"); +                        goto out; +                } +        } + +        ret = dict_get_str (rsp_dict, "scrub_log_file", &scrub_log); +        if (!ret) { +                ret = dict_set_str (aggr, "scrub_log_file", scrub_log); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrubber log file location"); +                        goto out; +                } +        } + +        ret = dict_get_str (rsp_dict, "features.scrub-freq", &scrub_freq); +        if (!ret) { +                ret = dict_set_str (aggr, "features.scrub-freq", scrub_freq); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrub-frequency value to dictionary"); +                        goto out; +                } +        } + +        ret = dict_get_str (rsp_dict, "features.scrub-throttle", &scrub_impact); +        if (!ret) { +                ret = dict_set_str (aggr, "features.scrub-throttle", +                                    scrub_impact); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrub-throttle value to dictionary"); +                        goto out; +                } +        } + +        ret = dict_get_str (rsp_dict, "features.scrub", &scrub_state); +        if (!ret) { +                ret = dict_set_str (aggr, "features.scrub", scrub_state); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrub state value to dictionary"); +                        goto out; +                } +        } + +        ret = 0; +out: +        return ret; +} + +int +glusterd_bitrot_volume_node_rsp (dict_t *aggr, dict_t *rsp_dict) +{ +        int                      ret                = -1; +        uint64_t                 value              = 0; +        int32_t                  count              = 0; +        int32_t                  index              = 0; +        char                     key[256]           = {0,}; +        char                     buf[1024]          = {0,}; +        uint64_t                 error_count        = 0; +        int32_t                  i                  = 0; +        uint64_t                 scrubbed_files     = 0; +        uint64_t                 unsigned_files     = 0; +        uint64_t                 scrub_duration     = 0; +        uint64_t                 last_scrub_time    = 0; +        char                    *volname            = NULL; +        char                    *node_str           = NULL; +        char                    *scrub_freq         = NULL; +        char                    *scrub_state        = NULL; +        char                    *scrub_impact       = NULL; +        xlator_t                *this               = NULL; +        glusterd_conf_t         *priv               = NULL; +        glusterd_volinfo_t      *volinfo            = NULL; + +        this = THIS; +        GF_ASSERT (this); + +        priv = this->private; +        GF_ASSERT (priv); + +        ret = dict_set_str (aggr, "bitrot_log_file", +                           (priv->bitd_svc.proc.logfile)); +        if (ret) { +                gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, +                        "Failed to set bitrot log file location"); +                goto out; +        } + +        ret = dict_set_str (aggr, "scrub_log_file", +                           (priv->scrub_svc.proc.logfile)); +        if (ret) { +                gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, +                        "Failed to set scrubber log file location"); +                goto out; +        } + +        ret = dict_get_str (aggr, "volname", &volname); +        if (ret) { +                gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, +                        "Unable to get volume name"); +                goto out; +        } + +        ret = glusterd_volinfo_find (volname, &volinfo); +        if (ret) { +                gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, +                        "Unable to find volinfo for volume: %s", volname); +                goto out; +        } + +        ret = dict_get_int32 (aggr, "count", &i); +        i++; + +        ret = dict_set_int32 (aggr, "count", i); +        if (ret) +                gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, +                        "Failed to set count"); + +        snprintf (buf, 1024, "%s", uuid_utoa (MY_UUID)); + +        snprintf (key, 256, "node-uuid-%d", i); +        ret = dict_set_dynstr_with_alloc (aggr, key, buf); +        if (ret) +                gf_msg (this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, +                        "failed to set node-uuid"); + +        ret = dict_get_str (volinfo->dict, "features.scrub-freq", &scrub_freq); +        if (!ret) { +                ret = dict_set_str (aggr, "features.scrub-freq", scrub_freq); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrub-frequency value to dictionary"); +                } +        } else { +                /* By Default scrub-frequency is bi-weekly. So when user +                 * enable bitrot then scrub-frequency value will not be +                 * present in volinfo->dict. Setting by-default value of +                 * scrub-frequency explicitly for presenting it to scrub +                 * status. +                 */ +                 ret = dict_set_dynstr_with_alloc (aggr, "features.scrub-freq", +                                                   "biweekly"); +                 if (ret) { +                         gf_msg_debug (this->name, 0, "Failed to set " +                                       "scrub-frequency value to dictionary"); +                 } +        } + +        ret = dict_get_str (volinfo->dict, "features.scrub-throttle", +                            &scrub_impact); +        if (!ret) { +                ret = dict_set_str (aggr, "features.scrub-throttle", +                                    scrub_impact); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrub-throttle value to dictionary"); +                } +        } else { +                /* By Default scrub-throttle is lazy. So when user +                 * enable bitrot then scrub-throttle value will not be +                 * present in volinfo->dict. Setting by-default value of +                 * scrub-throttle explicitly for presenting it to +                 * scrub status. +                 */ +                 ret = dict_set_dynstr_with_alloc (aggr, +                                                   "features.scrub-throttle", +                                                   "lazy"); +                 if (ret) { +                         gf_msg_debug (this->name, 0, "Failed to set " +                                       "scrub-throttle value to dictionary"); +                 } +        } + +        ret = dict_get_str (volinfo->dict, "features.scrub", &scrub_state); +        if (!ret) { +                ret = dict_set_str (aggr, "features.scrub", scrub_state); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrub state value to dictionary"); +                } +        } + +        ret = dict_get_uint64 (rsp_dict, "scrubbed-files", &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "scrubbed-files-%d", i); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrubbed-file value"); +                } +        } + +        ret = dict_get_uint64 (rsp_dict, "unsigned-files", &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "unsigned-files-%d", i); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "unsigned-file value"); +                } +        } + +        ret = dict_get_uint64 (rsp_dict, "last-scrub-time", &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "last-scrub-time-%d", i); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "last scrub time value"); +                } +        } + +        ret = dict_get_uint64 (rsp_dict, "scrub-duration", &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "scrub-duration-%d", i); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set " +                                      "scrubbed-duration value"); +                } +        } + +        ret = dict_get_uint64 (rsp_dict, "error-count", &value); +        if (!ret) { +                memset (key, 0, 256); +                snprintf (key, 256, "error-count-%d", i); +                ret = dict_set_uint64 (aggr, key, value); +                if (ret) { +                        gf_msg_debug (this->name, 0, "Failed to set error " +                                      "count value"); +                } +        } + +        ret = 0; +out: +        return ret; +} + +int  glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)  {          char                 key[256]      = {0,}; @@ -9060,6 +9448,10 @@ glusterd_handle_node_rsp (dict_t *req_dict, void *pending_entry,                  ret = glusterd_heal_volume_brick_rsp (req_dict, rsp_dict,                                                        op_ctx, op_errstr);                  break; +        case GD_OP_SCRUB_STATUS: +                ret = glusterd_bitrot_volume_node_rsp (op_ctx, rsp_dict); + +                break;          default:                  break;          } diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index d20b4ec5688..8bc82f663e7 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -441,6 +441,11 @@ int32_t  glusterd_handle_node_rsp (dict_t *req_ctx, void *pending_entry,                            glusterd_op_t op, dict_t *rsp_dict, dict_t *op_ctx,                            char **op_errstr, gd_node_type type); +int +glusterd_volume_bitrot_scrub_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict); + +int +glusterd_volume_heal_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict);  int32_t  glusterd_check_if_quota_trans_enabled (glusterd_volinfo_t *volinfo); diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index 1220c87cb6a..a51320ffaaf 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -121,6 +121,7 @@ typedef enum glusterd_op_ {          GD_OP_BITROT,          GD_OP_DETACH_TIER,          GD_OP_TIER_MIGRATE, +        GD_OP_SCRUB_STATUS,          GD_OP_MAX,  } glusterd_op_t; @@ -281,6 +282,20 @@ typedef struct _auth auth_t;  #define CAPS_OFFLOAD_SNAPSHOT 0x00000008  #define CAPS_OFFLOAD_ZERO     0x00000020 +struct glusterd_bitrot_scrub_ { +        char        *scrub_state; +        char        *scrub_impact; +        char        *scrub_freq; +        uint64_t    scrubbed_files; +        uint64_t    unsigned_files; +        uint64_t    last_scrub_time; +        uint64_t    scrub_duration; +        uint64_t    error_count; +}; + +typedef struct glusterd_bitrot_scrub_ glusterd_bitrot_scrub_t; + +  struct glusterd_rebalance_ {          gf_defrag_status_t       defrag_status;          uint64_t                 rebalance_files; @@ -387,6 +402,9 @@ struct glusterd_volinfo_ {          /* Replace brick status */          glusterd_replace_brick_t  rep_brick; +        /* Bitrot scrub status*/ +        glusterd_bitrot_scrub_t   bitrot_scrub; +          int                       version;          uint32_t                  quota_conf_version;          uint32_t                  cksum;  | 
