summaryrefslogtreecommitdiffstats
path: root/glusterfsd/src/glusterfsd-mgmt.c
diff options
context:
space:
mode:
authorKaushal M <kaushal@redhat.com>2012-01-01 15:59:28 +0530
committerVijay Bellur <vijay@gluster.com>2012-01-27 04:20:04 -0800
commit623919a78a7faac30d1f0df5793681da2c449e32 (patch)
treeee213fa96ebf5feb938babf36c34cb7c8d5f6a24 /glusterfsd/src/glusterfsd-mgmt.c
parenta078235dbede380ca695251e86a1502ca131d816 (diff)
cli: Extend "volume status" with statedump info
This patch enhances and extends the "volume status" command with information obtained from the statedump of the bricks of volumes. Adds new status types : clients, inode, fd, mem, callpool The new syntax of "volume status" is, #gluster volume status [all|{<volname> [<brickname>] [misc-details|clients|inode|fd|mem|callpool]}] Change-Id: I8d019718465bbc3de727653a839de7238f45da5c BUG: 765495 Signed-off-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.com/2637 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Krishnan Parthasarathi <kp@gluster.com>
Diffstat (limited to 'glusterfsd/src/glusterfsd-mgmt.c')
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c139
1 files changed, 138 insertions, 1 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 9e98190ba1f..558ab165161 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -45,6 +45,7 @@
#include "glusterfsd-mem-types.h"
#include "rpcsvc.h"
#include "cli1-xdr.h"
+#include "statedump.h"
static char is_mgmt_rpc_reconnect;
@@ -736,6 +737,138 @@ out:
}
int
+glusterfs_handle_brick_status (rpcsvc_request_t *req)
+{
+ int ret = -1;
+ gd1_mgmt_brick_op_req brick_req = {0,};
+ gd1_mgmt_brick_op_rsp rsp = {0,};
+ glusterfs_ctx_t *ctx = NULL;
+ glusterfs_graph_t *active = NULL;
+ xlator_t *this = NULL;
+ xlator_t *any = NULL;
+ xlator_t *xlator = NULL;
+ dict_t *dict = NULL;
+ dict_t *output = NULL;
+ char *volname = NULL;
+ char *xname = NULL;
+ int32_t cmd = 0;
+ char *msg = NULL;
+
+ GF_ASSERT (req);
+ this = THIS;
+ GF_ASSERT (this);
+
+ if (!xdr_to_generic (req->msg[0], &brick_req,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_req)) {
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ dict = dict_new ();
+ ret = dict_unserialize (brick_req.input.input_val,
+ brick_req.input.input_len, &dict);
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to unserialize "
+ "req-buffer to dictionary");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "cmd", &cmd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't get status op");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't get volname");
+ goto out;
+ }
+
+ ctx = glusterfs_ctx_get ();
+ GF_ASSERT (ctx);
+ active = ctx->active;
+ any = active->first;
+
+ ret = gf_asprintf (&xname, "%s-server", volname);
+ if (-1 == ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Out of memory");
+ goto out;
+ }
+
+ xlator = xlator_search_by_name (any, xname);
+ if (!xlator) {
+ gf_log (this->name, GF_LOG_ERROR, "xlator %s is not loaded",
+ xname);
+ ret = -1;
+ goto out;
+ }
+
+
+ output = dict_new ();
+ switch (cmd & GF_CLI_STATUS_MASK) {
+ case GF_CLI_STATUS_MEM:
+ ret = 0;
+ gf_proc_dump_mem_info_to_dict (output);
+ gf_proc_dump_mempool_info_to_dict (ctx, output);
+ break;
+
+ case GF_CLI_STATUS_CLIENTS:
+ ret = xlator->dumpops->priv_to_dict (xlator, output);
+ break;
+
+ case GF_CLI_STATUS_INODE:
+ ret = xlator->dumpops->inode_to_dict (xlator, output);
+ break;
+
+ case GF_CLI_STATUS_FD:
+ ret = xlator->dumpops->fd_to_dict (xlator, output);
+ break;
+
+ case GF_CLI_STATUS_CALLPOOL:
+ ret = 0;
+ gf_proc_dump_pending_frames_to_dict (ctx->pool, output);
+ break;
+
+ default:
+ ret = -1;
+ msg = gf_strdup ("Unknown status op");
+ break;
+ }
+ rsp.op_ret = ret;
+ rsp.op_errno = 0;
+ if (ret && msg)
+ rsp.op_errstr = msg;
+ else
+ rsp.op_errstr = "";
+
+ ret = dict_allocate_and_serialize (output, &rsp.output.output_val,
+ (size_t *)&rsp.output.output_len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to serialize output dict to rsp");
+ goto out;
+ }
+
+ ret = glusterfs_submit_reply (req, &rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
+
+out:
+ if (dict)
+ dict_unref (dict);
+ if (brick_req.input.input_val)
+ free (brick_req.input.input_val);
+ if (xname)
+ GF_FREE (xname);
+ if (msg)
+ GF_FREE (msg);
+ if (rsp.output.output_val)
+ GF_FREE (rsp.output.output_val);
+
+ return ret;
+}
+
+int
glusterfs_handle_rpc_msg (rpcsvc_request_t *req)
{
int ret = -1;
@@ -751,6 +884,9 @@ glusterfs_handle_rpc_msg (rpcsvc_request_t *req)
case GLUSTERD_BRICK_XLATOR_HEAL:
ret = glusterfs_handle_translator_heal (req);
break;
+ case GLUSTERD_BRICK_STATUS:
+ ret = glusterfs_handle_brick_status (req);
+ break;
default:
break;
}
@@ -806,7 +942,8 @@ rpcsvc_actor_t glusterfs_actors[] = {
[GLUSTERD_BRICK_NULL] = { "NULL", GLUSTERD_BRICK_NULL, glusterfs_handle_rpc_msg, NULL, NULL, 0},
[GLUSTERD_BRICK_TERMINATE] = { "TERMINATE", GLUSTERD_BRICK_TERMINATE, glusterfs_handle_rpc_msg, NULL, NULL, 0},
[GLUSTERD_BRICK_XLATOR_INFO] = { "TRANSLATOR INFO", GLUSTERD_BRICK_XLATOR_INFO, glusterfs_handle_rpc_msg, NULL, NULL, 0},
- [GLUSTERD_BRICK_XLATOR_HEAL] = { "TRANSLATOR HEAL", GLUSTERD_BRICK_XLATOR_HEAL, glusterfs_handle_rpc_msg, NULL, NULL, 0}
+ [GLUSTERD_BRICK_XLATOR_HEAL] = { "TRANSLATOR HEAL", GLUSTERD_BRICK_XLATOR_HEAL, glusterfs_handle_rpc_msg, NULL, NULL, 0},
+ [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS, glusterfs_handle_rpc_msg, NULL, NULL, 0}
};
struct rpcsvc_program glusterfs_mop_prog = {