summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd
diff options
context:
space:
mode:
authorSamikshan Bairagya <samikshan@gmail.com>2016-07-07 20:33:02 +0530
committerAtin Mukherjee <amukherj@redhat.com>2016-08-26 08:23:37 -0700
commit4a3454753f6e4ddc309c8d1cb11a6e4e432c1da6 (patch)
tree1bfb4258035063b2b2678a9e2e898234b5199b1e /xlators/mgmt/glusterd
parent7d3de1aed87ad9cd519cbc05e744f086760620c4 (diff)
glusterd/cli: cli to get local state representation from glusterd
Currently there is no existing CLI that can be used to get the local state representation of the cluster as maintained in glusterd in a readable as well as parseable format. The CLI added has the following usage: # gluster get-state [daemon] [odir <path/to/output/dir>] [file <filename>] This would dump data points that reflect the local state representation of the cluster as maintained in glusterd (no other daemons are supported as of now) to a file inside the specified output directory. The default output directory and filename is /var/run/gluster and glusterd_state_<timestamp> respectively. The option for specifying the daemon name leaves room to add support for other daemons in the future. Following are the data points captured as of now to represent the state from the local glusterd pov: * Peer: - Primary hostname - uuid - state - connection status - List of hostnames * Volumes: - name, id, transport type, status - counts: bricks, snap, subvol, stripe, arbiter, disperse, redundancy - snapd status - quorum status - tiering related information - rebalance status - replace bricks status - snapshots * Bricks: - Path, hostname (for all bricks these info will be shown) - port, rdma port, status, mount options, filesystem type and signed in status for bricks running locally. * Services: - name, online status for initialised services * Others: - Base port, last allocated port - op-version - MYUUID Change-Id: I4a45cc5407ab92d8afdbbd2098ece851f7e3d618 BUG: 1353156 Signed-off-by: Samikshan Bairagya <samikshan@gmail.com> Reviewed-on: http://review.gluster.org/14873 Reviewed-by: Avra Sengupta <asengupt@redhat.com> Smoke: Gluster Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c516
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h18
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c51
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.h3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c36
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c194
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h26
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h1
9 files changed, 846 insertions, 1 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 2c03c28168d..4a027f4ed7d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -4947,6 +4947,521 @@ glusterd_handle_get_vol_opt (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req, __glusterd_handle_get_vol_opt);
}
+
+static int
+glusterd_print_global_options (dict_t *opts, char *key, data_t *val, void *data)
+{
+ FILE *fp = NULL;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, key, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, val, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, data, out);
+
+ fp = (FILE *) data;
+ fprintf (fp, "%s: %s\n", key, val->data);
+out:
+ return 0;
+}
+
+static int
+glusterd_print_snapinfo_by_vol (FILE *fp, glusterd_volinfo_t *volinfo, int volcount)
+{
+ int ret = -1;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp_vol = NULL;
+ glusterd_snap_t *snapinfo = NULL;
+ int snapcount = 0;
+ char timestr[64] = {0,};
+ char snap_status_str[STATUS_STRLEN] = {0,};
+
+ GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, fp, out);
+
+ cds_list_for_each_entry_safe (snap_vol, tmp_vol, &volinfo->snap_volumes,
+ snapvol_list) {
+ snapcount++;
+ snapinfo = snap_vol->snapshot;
+
+ ret = glusterd_get_snap_status_str (snapinfo, snap_status_str);
+ if (ret) {
+ gf_msg (THIS->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get status for snapshot: %s",
+ snapinfo->snapname);
+
+ goto out;
+ }
+ gf_time_fmt (timestr, sizeof timestr, snapinfo->time_stamp,
+ gf_timefmt_FT);
+
+ fprintf (fp, "Volume%d.snapshot%d.name: %s\n",
+ volcount, snapcount, snapinfo->snapname);
+ fprintf (fp, "Volume%d.snapshot%d.id: %s\n", volcount, snapcount,
+ gf_strdup (uuid_utoa (snapinfo->snap_id)));
+ fprintf (fp, "Volume%d.snapshot%d.time: %s\n",
+ volcount, snapcount, timestr);
+
+ if (snapinfo->description)
+ fprintf (fp, "Volume%d.snapshot%d.description: %s\n",
+ volcount, snapcount, snapinfo->description);
+ fprintf (fp, "Volume%d.snapshot%d.status: %s\n",
+ volcount, snapcount, snap_status_str);
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static int
+glusterd_get_state (rpcsvc_request_t *req, dict_t *dict)
+{
+ int32_t ret = -1;
+ gf_cli_rsp rsp = {0,};
+ int fd = -1;
+ FILE *fp = NULL;
+ DIR *dp = NULL;
+ char err_str[2048] = {0,};
+ glusterd_conf_t *priv = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_peer_hostname_t *peer_hostname_info = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_snap_t *snapinfo = NULL;
+ xlator_t *this = NULL;
+ char *odir = NULL;
+ char *filename = NULL;
+ char *ofilepath = NULL;
+ int count = 0;
+ int count_bkp = 0;
+ int odirlen = 0;
+ time_t now = 0;
+ char timestamp[16] = {0,};
+
+ char *vol_type_str = NULL;
+ char *hot_tier_type_str = NULL;
+ char *cold_tier_type_str = NULL;
+
+ char transport_type_str[STATUS_STRLEN] = {0,};
+ char quorum_status_str[STATUS_STRLEN] = {0,};
+ char rebal_status_str[STATUS_STRLEN] = {0,};
+ char peer_state_str[STATUS_STRLEN] = {0,};
+ char vol_status_str[STATUS_STRLEN] = {0,};
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO (THIS->name, this, out);
+
+ priv = THIS->private;
+ GF_VALIDATE_OR_GOTO (this->name, priv, out);
+
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+
+ ret = dict_get_str (dict, "odir", &odir);
+ if (ret) {
+ gf_asprintf (&odir, "%s", "/var/run/gluster/");
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_DICT_GET_FAILED,
+ "Default output directory: %s", odir);
+ }
+
+ dp = sys_opendir (odir);
+ if (dp) {
+ sys_closedir (dp);
+ } else {
+ if (errno == ENOENT) {
+ snprintf (err_str, sizeof (err_str),
+ "Output directory %s does not exist.", odir);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_GET_FAILED, "%s", err_str);
+ } else if (errno == ENOTDIR) {
+ snprintf (err_str, sizeof (err_str), "Output directory "
+ "does not exist. %s points to a file.", odir);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_GET_FAILED, "%s", err_str);
+ }
+
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "filename", &filename);
+ if (ret) {
+ now = time (NULL);
+ strftime (timestamp, sizeof (timestamp),
+ "%Y%m%d_%H%M%S", localtime (&now));
+ gf_asprintf (&filename, "%s_%s", "glusterd_state", timestamp);
+
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ GD_MSG_DICT_GET_FAILED,
+ "Default filename: %s", filename);
+ }
+
+ odirlen = strlen (odir);
+ if (odir[odirlen-1] != '/')
+ strcat (odir, "/");
+
+ gf_asprintf (&ofilepath, "%s%s", odir, filename);
+
+ ret = dict_set_str (dict, "ofilepath", ofilepath);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_SET_FAILED, "Unable to set output path");
+ goto out;
+ }
+
+ fp = fopen (ofilepath, "w");
+ if (!fp) {
+ snprintf (err_str, sizeof (err_str),
+ "Failed to open file at %s", ofilepath);
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_GET_FAILED, "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+
+ fprintf (fp, "[Global]\n");
+
+ fprintf (fp, "MYUUID: %s\n", gf_strdup (uuid_utoa (priv->uuid)));
+ fprintf (fp, "op-version: %d\n", priv->op_version);
+
+ fprintf (fp, "\n[Global options]\n");
+
+ if (priv->opts)
+ dict_foreach (priv->opts, glusterd_print_global_options, fp);
+
+ rcu_read_lock ();
+ fprintf (fp, "\n[Peers]\n");
+
+ cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
+ ret = gd_peer_state_str (peerinfo, peer_state_str);
+ if (ret) {
+ rcu_read_unlock ();
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get peer state");
+ goto out;
+ }
+
+ fprintf (fp, "Peer%d.primary_hostname: %s\n", ++count,
+ peerinfo->hostname);
+ fprintf (fp, "Peer%d.uuid: %s\n", count, gd_peer_uuid_str (peerinfo));
+ fprintf (fp, "Peer%d.state: %s\n", count, peer_state_str);
+ fprintf (fp, "Peer%d.connected: %d\n", count, peerinfo->connected);
+
+ fprintf (fp, "Peer%d.hostnames: ", count);
+ cds_list_for_each_entry (peer_hostname_info,
+ &peerinfo->hostnames, hostname_list)
+ fprintf (fp, "%s, ", peer_hostname_info->hostname);
+ fprintf (fp, "\n");
+ }
+ rcu_read_unlock ();
+
+ count = 0;
+ fprintf (fp, "\n[Volumes]\n");
+
+ cds_list_for_each_entry (volinfo, &priv->volumes, vol_list) {
+ ret = glusterd_volume_get_type_str (volinfo, &vol_type_str);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get type for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+
+ ret = glusterd_volume_get_status_str (volinfo, vol_status_str);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get status for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+
+ ret = glusterd_volume_get_transport_type_str (volinfo,
+ transport_type_str);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get transport type for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+
+ ret = glusterd_volume_get_quorum_status_str (volinfo,
+ quorum_status_str);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get quorum status for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+
+ ret = glusterd_volume_get_rebalance_status_str (volinfo,
+ rebal_status_str);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get rebalance status for volume: %s",
+ volinfo->volname);
+ goto out;
+ }
+
+ fprintf (fp, "Volume%d.name: %s\n", ++count, volinfo->volname);
+ fprintf (fp, "Volume%d.id: %s\n", count,
+ gf_strdup (uuid_utoa (volinfo->volume_id)));
+ fprintf (fp, "Volume%d.type: %s\n", count, vol_type_str);
+ fprintf (fp, "Volume%d.transport_type: %s\n", count,
+ transport_type_str);
+ fprintf (fp, "Volume%d.status: %s\n", count, vol_status_str);
+ fprintf (fp, "Volume%d.brickcount: %d\n", count,
+ volinfo->brick_count);
+
+ count_bkp = count;
+ count = 0;
+ cds_list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ fprintf (fp, "Volume%d.Brick%d.path: %s:%s\n",
+ count_bkp, ++count, brickinfo->hostname,
+ brickinfo->path);
+ fprintf (fp, "Volume%d.Brick%d.hostname: %s\n",
+ count_bkp, count, brickinfo->hostname);
+
+ /* Add following information only for bricks
+ * local to current node */
+ if (gf_uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
+ fprintf (fp, "Volume%d.Brick%d.port: %d\n", count_bkp,
+ count, brickinfo->port);
+ fprintf (fp, "Volume%d.Brick%d.rdma_port: %d\n", count_bkp,
+ count, brickinfo->rdma_port);
+ fprintf (fp, "Volume%d.Brick%d.status: %s\n", count_bkp,
+ count, brickinfo->status ? "Started" : "Stopped");
+ fprintf (fp, "Volume%d.Brick%d.filesystem_type: %s\n",
+ count_bkp, count, brickinfo->fstype);
+ fprintf (fp, "Volume%d.Brick%d.mount_options: %s\n",
+ count_bkp, count, brickinfo->mnt_opts);
+ fprintf (fp, "Volume%d.Brick%d.signedin: %s\n", count_bkp,
+ count, brickinfo->signed_in ? "True" : "False");
+ }
+
+ count = count_bkp;
+
+ ret = glusterd_print_snapinfo_by_vol (fp, volinfo, count);
+ if (ret)
+ goto out;
+
+ fprintf (fp, "Volume%d.snap_count: %"PRIu64"\n", count,
+ volinfo->snap_count);
+ fprintf (fp, "Volume%d.stripe_count: %d\n", count,
+ volinfo->stripe_count);
+ fprintf (fp, "Volume%d.subvol_count: %d\n", count,
+ volinfo->subvol_count);
+ fprintf (fp, "Volume%d.arbiter_count: %d\n", count,
+ volinfo->arbiter_count);
+ fprintf (fp, "Volume%d.disperse_count: %d\n", count,
+ volinfo->disperse_count);
+ fprintf (fp, "Volume%d.redundancy_count: %d\n", count,
+ volinfo->redundancy_count);
+ fprintf (fp, "Volume%d.quorum_status: %s\n", count,
+ quorum_status_str);
+
+ fprintf (fp, "Volume%d.snapd_svc.online_status: %s\n", count,
+ volinfo->snapd.svc.online ? "Online" : "Offline");
+ fprintf (fp, "Volume%d.snapd_svc.inited: %s\n", count,
+ volinfo->snapd.svc.inited ? "True" : "False");
+
+ fprintf (fp, "Volume%d.rebalance.id: %s\n", count,
+ gf_strdup (uuid_utoa (volinfo->rebal.rebalance_id)));
+ fprintf (fp, "Volume%d.rebalance.status: %s\n", count,
+ rebal_status_str);
+ fprintf (fp, "Volume%d.rebalance.failures: %"PRIu64"\n", count,
+ volinfo->rebal.rebalance_failures);
+ fprintf (fp, "Volume%d.rebalance.skipped: %"PRIu64"\n", count,
+ volinfo->rebal.skipped_files);
+ fprintf (fp, "Volume%d.rebalance.lookedup: %"PRIu64"\n", count,
+ volinfo->rebal.lookedup_files);
+ fprintf (fp, "Volume%d.rebalance.files: %"PRIu64"\n", count,
+ volinfo->rebal.rebalance_files);
+ fprintf (fp, "Volume%d.rebalance.data: %"PRIu64"\n", count,
+ volinfo->rebal.rebalance_data);
+ fprintf (fp, "Volume%d.rebalance.data: %"PRIu64"\n", count,
+ volinfo->rebal.rebalance_data);
+
+ if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
+ ret = glusterd_volume_get_hot_tier_type_str (
+ volinfo, &hot_tier_type_str);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get hot tier type for "
+ "volume: %s", volinfo->volname);
+ goto out;
+ }
+
+ ret = glusterd_volume_get_cold_tier_type_str (
+ volinfo, &cold_tier_type_str);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_STATE_STR_GET_FAILED,
+ "Failed to get cold tier type for "
+ "volume: %s", volinfo->volname);
+ goto out;
+ }
+
+ fprintf (fp, "Volume%d.tier_info.cold_tier_type: %s\n",
+ count, cold_tier_type_str);
+ fprintf (fp, "Volume%d.tier_info.cold_brick_count: %d\n",
+ count, volinfo->tier_info.cold_brick_count);
+ fprintf (fp, "Volume%d.tier_info.cold_replica_count: %d\n",
+ count, volinfo->tier_info.cold_replica_count);
+ fprintf (fp, "Volume%d.tier_info.cold_disperse_count: %d\n",
+ count, volinfo->tier_info.cold_disperse_count);
+ fprintf (fp, "Volume%d.tier_info.cold_dist_leaf_count: %d\n",
+ count, volinfo->tier_info.cold_dist_leaf_count);
+ fprintf (fp, "Volume%d.tier_info.cold_redundancy_count: %d\n",
+ count, volinfo->tier_info.cold_redundancy_count);
+ fprintf (fp, "Volume%d.tier_info.hot_tier_type: %s\n",
+ count, hot_tier_type_str);
+ fprintf (fp, "Volume%d.tier_info.hot_brick_count: %d\n",
+ count, volinfo->tier_info.hot_brick_count);
+ fprintf (fp, "Volume%d.tier_info.hot_replica_count: %d\n",
+ count, volinfo->tier_info.hot_replica_count);
+ fprintf (fp, "Volume%d.tier_info.promoted: %d\n",
+ count, volinfo->tier_info.promoted);
+ fprintf (fp, "Volume%d.tier_info.demoted: %d\n",
+ count, volinfo->tier_info.demoted);
+ }
+
+ if (volinfo->rep_brick.src_brick && volinfo->rep_brick.dst_brick) {
+ fprintf (fp, "Volume%d.replace_brick.src: %s:%s\n", count,
+ volinfo->rep_brick.src_brick->hostname,
+ volinfo->rep_brick.src_brick->path);
+ fprintf (fp, "Volume%d.replace_brick.dest: %s:%s\n", count,
+ volinfo->rep_brick.dst_brick->hostname,
+ volinfo->rep_brick.dst_brick->path);
+ }
+
+ fprintf (fp, "\n");
+ }
+
+ count = 0;
+
+ fprintf (fp, "\n[Services]\n");
+
+ if (priv->shd_svc.inited) {
+ fprintf (fp, "svc%d.name: %s\n", ++count, priv->shd_svc.name);
+ fprintf (fp, "svc%d.online_status: %s\n\n", count,
+ priv->shd_svc.online ? "Online" : "Offline");
+ }
+
+ if (priv->nfs_svc.inited) {
+ fprintf (fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
+ fprintf (fp, "svc%d.online_status: %s\n\n", count,
+ priv->nfs_svc.online ? "Online" : "Offline");
+ }
+
+ if (priv->bitd_svc.inited) {
+ fprintf (fp, "svc%d.name: %s\n", ++count, priv->bitd_svc.name);
+ fprintf (fp, "svc%d.online_status: %s\n\n", count,
+ priv->bitd_svc.online ? "Online" : "Offline");
+ }
+
+ if (priv->scrub_svc.inited) {
+ fprintf (fp, "svc%d.name: %s\n", ++count, priv->scrub_svc.name);
+ fprintf (fp, "svc%d.online_status: %s\n\n", count,
+ priv->scrub_svc.online ? "Online" : "Offline");
+ }
+
+ if (priv->quotad_svc.inited) {
+ fprintf (fp, "svc%d.name: %s\n", ++count, priv->quotad_svc.name);
+ fprintf (fp, "svc%d.online_status: %s\n\n", count,
+ priv->quotad_svc.online ? "Online" : "Offline");
+ }
+
+ fprintf (fp, "\n[Misc]\n");
+ if (priv->pmap) {
+ fprintf (fp, "Base port: %d\n", priv->pmap->base_port);
+ fprintf (fp, "Last allocated port: %d\n",
+ priv->pmap->last_alloc);
+ }
+out:
+
+ if (fp)
+ fclose(fp);
+
+ rsp.op_ret = ret;
+ rsp.op_errstr = err_str;
+
+ ret = dict_allocate_and_serialize (dict, &rsp.dict.dict_val,
+ &rsp.dict.dict_len);
+ glusterd_to_cli (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gf_cli_rsp, dict);
+
+ return ret;
+}
+
+static int
+__glusterd_handle_get_state (rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{0,},};
+ dict_t *dict = NULL;
+ char err_str[2048] = {0,};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO (THIS->name, this, out);
+ GF_VALIDATE_OR_GOTO (this->name, req, out);
+
+ ret = xdr_to_generic (req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf (err_str, sizeof (err_str), "Failed to decode "
+ "request received from cli");
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_REQ_DECODE_FAIL, "%s", err_str);
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new ();
+
+ ret = dict_unserialize (cli_req.dict.dict_val,
+ cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg (this->name, GF_LOG_ERROR, 0,
+ GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf (err_str, sizeof (err_str), "Unable to decode"
+ " the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ gf_msg (this->name, GF_LOG_INFO, 0, GD_MSG_DAEMON_STATE_REQ_RCVD,
+ "Received request to get state for glusterd");
+
+ ret = glusterd_get_state (req, dict);
+
+out:
+ if (dict)
+ dict_unref (dict);
+ return ret;
+}
+
+int
+glusterd_handle_get_state (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_get_state);
+}
+
static int
get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
{
@@ -5410,6 +5925,7 @@ rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_GANESHA] = { "GANESHA" , GLUSTER_CLI_GANESHA, glusterd_handle_ganesha_cmd, NULL, 0, DRC_NA},
[GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT, glusterd_handle_get_vol_opt, NULL, 0, DRC_NA},
[GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT, glusterd_handle_bitrot, NULL, 0, DRC_NA},
+ [GLUSTER_CLI_GET_STATE] = {"GET_STATE", GLUSTER_CLI_GET_STATE, glusterd_handle_get_state, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_cli_prog = {
diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h
index 2c76dbfe073..f0c9ee25ed6 100644
--- a/xlators/mgmt/glusterd/src/glusterd-messages.h
+++ b/xlators/mgmt/glusterd/src/glusterd-messages.h
@@ -41,7 +41,7 @@
#define GLUSTERD_COMP_BASE GLFS_MSGID_GLUSTERD
-#define GLFS_NUM_MESSAGES 583
+#define GLFS_NUM_MESSAGES 585
#define GLFS_MSGID_END (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1)
/* Messaged with message IDs */
@@ -4713,6 +4713,22 @@
*/
#define GD_MSG_SYSCALL_FAIL (GLUSTERD_COMP_BASE + 583)
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendation
+ *
+ */
+#define GD_MSG_DAEMON_STATE_REQ_RCVD (GLUSTERD_COMP_BASE + 584)
+
+/*!
+ * @messageid
+ * @diagnosis
+ * @recommendation
+ *
+ */
+#define GD_MSG_STATE_STR_GET_FAILED (GLUSTERD_COMP_BASE + 585)
+
/*------------*/
#define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages"
#endif /* !_GLUSTERD_MESSAGES_H_ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
index 1a97111d0f5..a8e99189dbd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
@@ -393,6 +393,57 @@ gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo)
return peerinfo->uuid_str;
}
+int
+gd_peer_state_str (glusterd_peerinfo_t *peerinfo, char *state_str)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, peerinfo, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, state_str, out);
+
+ switch (peerinfo->state.state) {
+ case GD_FRIEND_STATE_DEFAULT:
+ gf_asprintf (&state_str, "%s", "default");
+ break;
+ case GD_FRIEND_STATE_REQ_SENT:
+ gf_asprintf (&state_str, "%s", "request sent");
+ break;
+ case GD_FRIEND_STATE_REQ_RCVD:
+ gf_asprintf (&state_str, "%s", "request received");
+ break;
+ case GD_FRIEND_STATE_BEFRIENDED:
+ gf_asprintf (&state_str, "%s", "befriended");
+ break;
+ case GD_FRIEND_STATE_REQ_ACCEPTED:
+ gf_asprintf (&state_str, "%s", "request accepted");
+ break;
+ case GD_FRIEND_STATE_REQ_SENT_RCVD:
+ gf_asprintf (&state_str, "%s", "request sent received");
+ break;
+ case GD_FRIEND_STATE_REJECTED:
+ gf_asprintf (&state_str, "%s", "rejected");
+ break;
+ case GD_FRIEND_STATE_UNFRIEND_SENT:
+ gf_asprintf (&state_str, "%s", "unfriend sent");
+ break;
+ case GD_FRIEND_STATE_PROBE_RCVD:
+ gf_asprintf (&state_str, "%s", "probe received");
+ break;
+ case GD_FRIEND_STATE_CONNECTED_RCVD:
+ gf_asprintf (&state_str, "%s", "connected received");
+ break;
+ case GD_FRIEND_STATE_CONNECTED_ACCEPTED:
+ gf_asprintf (&state_str, "%s", "connected accepted");
+ break;
+ case GD_FRIEND_STATE_MAX:
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
gf_boolean_t
glusterd_are_all_peers_up ()
{
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
index e74d1ed9536..e500ee1dd91 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.h
@@ -42,6 +42,9 @@ glusterd_uuid_to_hostname (uuid_t uuid);
char*
gd_peer_uuid_str (glusterd_peerinfo_t *peerinfo);
+int
+gd_peer_state_str (glusterd_peerinfo_t *peerinfo, char *state_str);
+
gf_boolean_t
glusterd_are_all_peers_up ();
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
index 5a09254511b..ef094c16f51 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c
@@ -4093,3 +4093,39 @@ gd_get_snap_conf_values_if_present (dict_t *dict, uint64_t *sys_hard_limit,
GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT);
}
}
+
+int
+glusterd_get_snap_status_str (glusterd_snap_t *snapinfo, char *snap_status_str)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, snapinfo, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, snap_status_str, out);
+
+ switch (snapinfo->snap_status) {
+ case GD_SNAP_STATUS_NONE:
+ sprintf (snap_status_str, "%s", "none");
+ break;
+ case GD_SNAP_STATUS_INIT:
+ sprintf (snap_status_str, "%s", "init");
+ break;
+ case GD_SNAP_STATUS_IN_USE:
+ sprintf (snap_status_str, "%s", "in_use");
+ break;
+ case GD_SNAP_STATUS_DECOMMISSION:
+ sprintf (snap_status_str, "%s", "decommissioned");
+ break;
+ case GD_SNAP_STATUS_UNDER_RESTORE:
+ sprintf (snap_status_str, "%s", "under_restore");
+ break;
+ case GD_SNAP_STATUS_RESTORED:
+ sprintf (snap_status_str, "%s", "restored");
+ break;
+ default:
+ goto out;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
index c0e7e8e218d..b964a438623 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h
@@ -161,6 +161,8 @@ glusterd_is_snap_soft_limit_reached (glusterd_volinfo_t *volinfo,
void
gd_get_snap_conf_values_if_present (dict_t *opts, uint64_t *sys_hard_limit,
uint64_t *sys_soft_limit);
+int
+glusterd_get_snap_status_str (glusterd_snap_t *snapinfo, char *snap_status_str);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 872475e9ab0..ec371d80815 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -59,6 +59,7 @@
#include "glusterd-bitd-svc.h"
#include "glusterd-server-quorum.h"
#include "quota-common-utils.h"
+#include "common-utils.h"
#include "xdr-generic.h"
#include <sys/resource.h>
@@ -11225,6 +11226,199 @@ glusterd_is_volume_started (glusterd_volinfo_t *volinfo)
return (volinfo->status == GLUSTERD_STATUS_STARTED);
}
+int
+glusterd_volume_get_type_str (glusterd_volinfo_t *volinfo, char **voltype_str)
+{
+ int ret = -1;
+ int type = 0;
+ int brick_count = 0;
+ int dist_count = 0;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
+
+ type = get_vol_type (volinfo->type, volinfo->brick_count,
+ volinfo->dist_leaf_count);
+
+ *voltype_str = vol_type_str[type];
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_volume_get_status_str (glusterd_volinfo_t *volinfo, char *status_str)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, status_str, out);
+
+ switch (volinfo->status) {
+ case GLUSTERD_STATUS_NONE:
+ sprintf (status_str, "%s", "Created");
+ break;
+ case GLUSTERD_STATUS_STARTED:
+ sprintf (status_str, "%s", "Started");
+ break;
+ case GLUSTERD_STATUS_STOPPED:
+ sprintf (status_str, "%s", "Stopped");
+ break;
+ default:
+ goto out;
+
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_volume_get_transport_type_str (glusterd_volinfo_t *volinfo,
+ char *transport_type_str)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, transport_type_str, out);
+
+ switch (volinfo->transport_type) {
+ case GF_TRANSPORT_TCP:
+ sprintf (transport_type_str, "%s", "tcp");
+ break;
+ case GF_TRANSPORT_RDMA:
+ sprintf (transport_type_str, "%s", "rdma");
+ break;
+ case GF_TRANSPORT_BOTH_TCP_RDMA:
+ sprintf (transport_type_str, "%s", "tcp_rdma_both");
+ break;
+ default:
+ goto out;
+
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_volume_get_quorum_status_str (glusterd_volinfo_t *volinfo,
+ char *quorum_status_str)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, quorum_status_str, out);
+
+ switch (volinfo->quorum_status) {
+ case NOT_APPLICABLE_QUORUM:
+ sprintf (quorum_status_str, "%s", "not_applicable");
+ break;
+ case MEETS_QUORUM:
+ sprintf (quorum_status_str, "%s", "meets");
+ break;
+ case DOESNT_MEET_QUORUM:
+ sprintf (quorum_status_str, "%s", "does_not_meet");
+ break;
+ default:
+ goto out;
+
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_volume_get_rebalance_status_str (glusterd_volinfo_t *volinfo,
+ char *rebal_status_str)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, rebal_status_str, out);
+
+ switch (volinfo->rebal.defrag_status) {
+ case GF_DEFRAG_STATUS_NOT_STARTED:
+ sprintf (rebal_status_str, "%s", "not_started");
+ break;
+ case GF_DEFRAG_STATUS_STARTED:
+ sprintf (rebal_status_str, "%s", "started");
+ break;
+ case GF_DEFRAG_STATUS_STOPPED:
+ sprintf (rebal_status_str, "%s", "stopped");
+ break;
+ case GF_DEFRAG_STATUS_COMPLETE:
+ sprintf (rebal_status_str, "%s", "completed");
+ break;
+ case GF_DEFRAG_STATUS_FAILED:
+ sprintf (rebal_status_str, "%s", "failed");
+ break;
+ case GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED:
+ sprintf (rebal_status_str, "%s", "layout_fix_started");
+ break;
+ case GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED:
+ sprintf (rebal_status_str, "%s", "layout_fix_stopped");
+ break;
+ case GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE:
+ sprintf (rebal_status_str, "%s", "layout_fix_complete");
+ break;
+ case GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED:
+ sprintf (rebal_status_str, "%s", "layout_fix_failed");
+ break;
+ default:
+ goto out;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_volume_get_hot_tier_type_str (glusterd_volinfo_t *volinfo,
+ char **hot_tier_type_str)
+{
+ int ret = -1;
+ int hot_tier_type = 0;
+ int hot_dist_count = 0;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, hot_tier_type_str, out);
+
+ hot_dist_count = volinfo->tier_info.hot_replica_count ?
+ volinfo->tier_info.hot_replica_count : 1;
+
+ hot_tier_type = get_vol_type (volinfo->tier_info.hot_type, hot_dist_count,
+ volinfo->tier_info.hot_brick_count);
+
+ *hot_tier_type_str = vol_type_str[hot_tier_type];
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_volume_get_cold_tier_type_str (glusterd_volinfo_t *volinfo,
+ char **cold_tier_type_str)
+{
+ int ret = -1;
+ int cold_tier_type = 0;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, cold_tier_type_str, out);
+
+ cold_tier_type = get_vol_type (volinfo->tier_info.cold_type,
+ volinfo->tier_info.cold_dist_leaf_count,
+ volinfo->tier_info.cold_brick_count);
+
+ *cold_tier_type_str = vol_type_str[cold_tier_type];
+
+ ret = 0;
+out:
+ return ret;
+}
+
/* This function will insert the element to the list in a order.
Order will be based on the compare function provided as a input.
If element to be inserted in ascending order compare should return:
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h
index f4c4138829f..ca07efd54ba 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.h
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.h
@@ -678,6 +678,32 @@ glusterd_nfs_pmap_deregister ();
gf_boolean_t
glusterd_is_volume_started (glusterd_volinfo_t *volinfo);
+int
+glusterd_volume_get_type_str (glusterd_volinfo_t *volinfo, char **vol_type_str);
+
+int
+glusterd_volume_get_status_str (glusterd_volinfo_t *volinfo, char *status_str);
+
+int
+glusterd_volume_get_transport_type_str (glusterd_volinfo_t *volinfo,
+ char *transport_type_str);
+
+int
+glusterd_volume_get_quorum_status_str (glusterd_volinfo_t *volinfo,
+ char *quorum_status_str);
+
+int
+glusterd_volume_get_rebalance_status_str (glusterd_volinfo_t *volinfo,
+ char *rebal_status_str);
+
+int
+glusterd_volume_get_hot_tier_type_str (glusterd_volinfo_t *volinfo,
+ char **hot_tier_type_str);
+
+int
+glusterd_volume_get_cold_tier_type_str (glusterd_volinfo_t *volinfo,
+ char **cold_tier_type_str);
+
void
glusterd_list_add_order (struct cds_list_head *new, struct cds_list_head *head,
int (*compare)(struct cds_list_head *,
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 090092000ac..bf427fdf660 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -56,6 +56,7 @@
#define GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT 90
#define GLUSTERD_SNAPS_MAX_SOFT_LIMIT_PERCENT 100
#define GLUSTERD_SERVER_QUORUM "server"
+#define STATUS_STRLEN 128
#define FMTSTR_CHECK_VOL_EXISTS "Volume %s does not exist"
#define FMTSTR_RESOLVE_BRICK "Could not find peer on which brick %s:%s resides"