summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd
diff options
context:
space:
mode:
authorGaurav Kumar Garg <ggarg@redhat.com>2015-04-09 15:36:20 +0530
committerKaushal M <kaushal@redhat.com>2015-05-04 01:56:58 -0700
commitda1416051d19d612d131acfde8589bc8658979b5 (patch)
treee4ffdeba3420fa794a5a4a2543955a25af2c2559 /xlators/mgmt/glusterd
parent7759748915485d6c740a7fed831376f298eb90bb (diff)
glusterd: gluster volume status should show status of bitrot and scrubber daemon
Command gluster volume status <VOLNAME> should show the status of bitrot and scrubber daemon and its pid information. Along with displaying bitrot and scrubber daemon information in gluster volume status command there should be command to show its individual status separately. Command to show individual status of bitrot and scrubber daemon will following. command to show only bitd daemon information will be gluster volume status <VOLNAME> bitd command to show only scrubber daemon information gluster volume status <VOLNAME> scrub Change-Id: Id86aae1156c8c599347c98e2a538f294d37376e4 BUG: 1209752 Signed-off-by: Gaurav Kumar Garg <ggarg@redhat.com> Reviewed-on: http://review.gluster.org/10175 Reviewed-by: Kaushal M <kaushal@redhat.com> Tested-by: Kaushal M <kaushal@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c98
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c8
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h2
4 files changed, 128 insertions, 0 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index ac69fc8712d..524ce35d841 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3987,6 +3987,26 @@ __glusterd_handle_status_volume (rpcsvc_request_t *req)
goto out;
}
+ if ((cmd & GF_CLI_STATUS_BITD) &&
+ (conf->op_version < GD_OP_VERSION_3_7_0)) {
+ snprintf (err_str, sizeof (err_str), "The cluster is operating "
+ "at a lesser version than %d. Getting the status of "
+ "bitd is not allowed in this state",
+ GD_OP_VERSION_3_7_0);
+ ret = -1;
+ goto out;
+ }
+
+ if ((cmd & GF_CLI_STATUS_SCRUB) &&
+ (conf->op_version < GD_OP_VERSION_3_7_0)) {
+ snprintf (err_str, sizeof (err_str), "The cluster is operating "
+ "at a lesser version than %d. Getting the status of "
+ "scrub is not allowed in this state",
+ GD_OP_VERSION_3_7_0);
+ ret = -1;
+ goto out;
+ }
+
ret = glusterd_op_begin_synctask (req, GD_OP_STATUS_VOLUME, dict);
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index aac393078b5..73f71196789 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -1399,6 +1399,22 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
"quota enabled", volname);
goto out;
}
+ } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
+ if (!glusterd_is_bitrot_enabled (volinfo)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), "Volume %s does not have "
+ "bitrot enabled", volname);
+ goto out;
+ }
+ } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
+ if (!glusterd_is_bitrot_enabled (volinfo)) {
+ ret = -1;
+ snprintf (msg, sizeof (msg), "Volume %s does not have "
+ "bitrot enabled. Scrubber will be enabled "
+ "automatically if bitrot is enabled",
+ volname);
+ goto out;
+ }
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
if (!glusterd_is_snapd_enabled (volinfo)) {
ret = -1;
@@ -2776,6 +2792,20 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
+ } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
+ ret = glusterd_add_node_to_dict (priv->bitd_svc.name,
+ rsp_dict, 0, vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+ } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
+ ret = glusterd_add_node_to_dict (priv->scrub_svc.name,
+ rsp_dict, 0, vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
ret = glusterd_add_node_to_dict ("snapd", rsp_dict, 0,
vol_opts);
@@ -2881,6 +2911,34 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
goto out;
other_count++;
node_count++;
+ other_index++;
+ }
+
+ if (glusterd_is_bitrot_enabled (volinfo)) {
+ ret = glusterd_add_node_to_dict
+ (priv->bitd_svc.name,
+ rsp_dict,
+ other_index,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
+ other_index++;
+ }
+
+ /* For handling scrub status. Scrub daemon will be
+ * running automatically when bitrot is enable*/
+ if (glusterd_is_bitrot_enabled (volinfo)) {
+ ret = glusterd_add_node_to_dict
+ (priv->scrub_svc.name,
+ rsp_dict,
+ other_index,
+ vol_opts);
+ if (ret)
+ goto out;
+ other_count++;
+ node_count++;
}
}
}
@@ -5998,6 +6056,8 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
case GF_CLI_STATUS_SHD:
case GF_CLI_STATUS_QUOTAD:
case GF_CLI_STATUS_SNAPD:
+ case GF_CLI_STATUS_BITD:
+ case GF_CLI_STATUS_SCRUB:
break;
default:
goto out;
@@ -6098,6 +6158,44 @@ glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr,
cds_list_add_tail (&pending_node->list, selected);
ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
+ if (!priv->bitd_svc.online) {
+ gf_log (this->name, GF_LOG_ERROR, "Bitrot is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->bitd_svc);
+ pending_node->type = GD_NODE_BITD;
+ pending_node->index = 0;
+ cds_list_add_tail (&pending_node->list, selected);
+
+ ret = 0;
+ } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
+ if (!priv->scrub_svc.online) {
+ gf_log (this->name, GF_LOG_ERROR, "Scrubber is not "
+ "running");
+ ret = -1;
+ goto out;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = &(priv->scrub_svc);
+ pending_node->type = GD_NODE_SCRUB;
+ pending_node->index = 0;
+ cds_list_add_tail (&pending_node->list, selected);
+
+ ret = 0;
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
if (!volinfo->snapd.svc.online) {
gf_log (this->name, GF_LOG_ERROR, "snapd is not "
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index ce55a9d3490..c72c7266f16 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -4020,6 +4020,10 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
svc = &(priv->nfs_svc);
else if (strcmp(server, priv->quotad_svc.name) == 0)
svc = &(priv->quotad_svc);
+ else if (strcmp(server, priv->bitd_svc.name) == 0)
+ svc = &(priv->bitd_svc);
+ else if (strcmp(server, priv->scrub_svc.name) == 0)
+ svc = &(priv->scrub_svc);
//Consider service to be running only when glusterd sees it Online
if (svc->online)
@@ -4041,6 +4045,10 @@ glusterd_add_node_to_dict (char *server, dict_t *dict, int count,
ret = dict_set_str (dict, key, "Self-heal Daemon");
else if (!strcmp (server, priv->quotad_svc.name))
ret = dict_set_str (dict, key, "Quota Daemon");
+ else if (!strcmp (server, priv->bitd_svc.name))
+ ret = dict_set_str (dict, key, "Bitrot Daemon");
+ else if (!strcmp (server, priv->scrub_svc.name))
+ ret = dict_set_str (dict, key, "Scrubber Daemon");
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index f2a9be15c9f..5ad92a47177 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -447,6 +447,8 @@ typedef enum gd_node_type_ {
GD_NODE_NFS,
GD_NODE_QUOTAD,
GD_NODE_SNAPD,
+ GD_NODE_BITD,
+ GD_NODE_SCRUB,
} gd_node_type;
typedef enum missed_snap_stat {