summaryrefslogtreecommitdiffstats
path: root/xlators
diff options
context:
space:
mode:
authorKaushal M <kaushal@redhat.com>2012-01-01 15:59:28 +0530
committerVijay Bellur <vijay@gluster.com>2012-01-27 04:20:04 -0800
commit623919a78a7faac30d1f0df5793681da2c449e32 (patch)
treeee213fa96ebf5feb938babf36c34cb7c8d5f6a24 /xlators
parenta078235dbede380ca695251e86a1502ca131d816 (diff)
cli: Extend "volume status" with statedump info
This patch enhances and extends the "volume status" command with information obtained from the statedump of the bricks of volumes. Adds new status types : clients, inode, fd, mem, callpool The new syntax of "volume status" is, #gluster volume status [all|{<volname> [<brickname>] [misc-details|clients|inode|fd|mem|callpool]}] Change-Id: I8d019718465bbc3de727653a839de7238f45da5c BUG: 765495 Signed-off-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.com/2637 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Krishnan Parthasarathi <kp@gluster.com>
Diffstat (limited to 'xlators')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c233
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c22
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h1
-rw-r--r--xlators/protocol/server/src/server.c139
4 files changed, 367 insertions, 28 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index d72581910..38d73b1f7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -192,6 +192,18 @@ glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickin
brick_req->name = "";
}
break;
+ case GD_OP_STATUS_VOLUME:
+ {
+ brick_req = GF_CALLOC (1, sizeof (*brick_req),
+ gf_gld_mt_mop_brick_req_t);
+ if (!brick_req) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Out of memory");
+ goto out;
+ }
+ brick_req->op = GLUSTERD_BRICK_STATUS;
+ brick_req->name = "";
+ }
+ break;
default:
goto out;
break;
@@ -562,7 +574,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
if (ret) {
snprintf (msg, sizeof(msg), "Volume %s does not exist", volname);
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup(msg);
ret = -1;
goto out;
}
@@ -573,8 +584,12 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
goto out;
ret = glusterd_brickinfo_from_brick (brick, &brickinfo);
- if (ret)
+ if (ret) {
+ snprintf (msg, sizeof (msg), "%s is not a brick",
+ brick);
+ gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
goto out;
+ }
ret = glusterd_volume_brickinfo_get (NULL,
brickinfo->hostname,
@@ -588,7 +603,6 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
" volume %s", brick, volname);
gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup(msg);
ret = -1;
goto out;
}
@@ -597,8 +611,12 @@ glusterd_op_stage_status_volume (dict_t *dict, char **op_errstr)
ret = 0;
out:
- if (ret && !(*op_errstr))
- *op_errstr = gf_strdup ("Validation Failed for Status");
+ if (ret) {
+ if (msg[0] != '\0')
+ *op_errstr = gf_strdup (msg);
+ else
+ *op_errstr = gf_strdup ("Validation Failed for Status");
+ }
gf_log (THIS->name, GF_LOG_DEBUG, "Returning: %d", ret);
return ret;
@@ -1242,14 +1260,13 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
{
int ret = -1;
int brick_count = 0;
- int32_t brick_index = 0;
+ int brick_index = -1;
uint32_t cmd = 0;
char *volname = NULL;
char *brick = NULL;
xlator_t *this = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_brickinfo_t *brickinfo = NULL;
- glusterd_brickinfo_t *tmpbrickinfo = NULL;
glusterd_conf_t *priv = NULL;
this = THIS;
@@ -1301,36 +1318,41 @@ glusterd_op_status_volume (dict_t *dict, char **op_errstr,
ret = dict_get_str (dict, "brick", &brick);
if (ret)
goto out;
- ret = glusterd_brickinfo_from_brick (brick, &tmpbrickinfo);
+
+ ret = glusterd_volume_brickinfo_get_by_brick (brick,
+ volinfo,
+ &brickinfo,
+ GF_PATH_COMPLETE);
if (ret)
goto out;
- if (uuid_is_null (tmpbrickinfo->uuid) &&
- glusterd_resolve_brick (tmpbrickinfo))
+
+ if (uuid_compare (brickinfo->uuid, priv->uuid))
goto out;
- }
- list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (!uuid_compare (brickinfo->uuid, priv->uuid)) {
+ glusterd_add_brick_to_dict (volinfo, brickinfo, rsp_dict, 0);
+ if (cmd & GF_CLI_STATUS_DETAIL)
+ glusterd_add_brick_detail_to_dict (volinfo, brickinfo,
+ rsp_dict, 0);
+ ret = dict_set_int32 (rsp_dict, "count", 1);
- if ((cmd & GF_CLI_STATUS_BRICK) != 0 &&
- (strcmp (tmpbrickinfo->path,
- brickinfo->path) ||
- uuid_compare (tmpbrickinfo->uuid,
- brickinfo->uuid)))
+ goto out;
+ } else {
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ brick_index++;
+ if (uuid_compare (brickinfo->uuid, priv->uuid))
continue;
glusterd_add_brick_to_dict (volinfo, brickinfo,
rsp_dict, brick_index);
- if (cmd & GF_CLI_STATUS_DETAIL)
+ if (cmd & GF_CLI_STATUS_DETAIL) {
glusterd_add_brick_detail_to_dict (volinfo,
brickinfo,
rsp_dict,
brick_index);
+ }
brick_count++;
}
- if (!(cmd & GF_CLI_STATUS_BRICK))
- brick_index++;
}
ret = dict_set_int32 (rsp_dict, "count", brick_count);
@@ -2149,6 +2171,7 @@ glusterd_need_brick_op (glusterd_op_t op)
switch (op) {
case GD_OP_PROFILE_VOLUME:
+ case GD_OP_STATUS_VOLUME:
ret = _gf_true;
break;
default:
@@ -2503,6 +2526,57 @@ glusterd_profile_volume_brick_rsp (glusterd_brickinfo_t *brickinfo,
return ret;
}
+void
+_status_volume_add_brick_rsp (dict_t *this, char *key, data_t *value,
+ void *data)
+{
+ char new_key[256] = {0,};
+ data_t *new_value = 0;
+ glusterd_pr_brick_rsp_conv_t *rsp_ctx = NULL;
+
+ rsp_ctx = data;
+ new_value = data_copy (value);
+ snprintf (new_key, sizeof (new_key), "brick%d.%s", rsp_ctx->count, key);
+ dict_set (rsp_ctx->dict, new_key, new_value);
+
+ return;
+}
+
+int
+glusterd_status_volume_brick_rsp (glusterd_brickinfo_t *brickinfo,
+ dict_t *rsp_dict, dict_t *op_ctx,
+ char **op_errstr)
+{
+ int ret = 0;
+ glusterd_pr_brick_rsp_conv_t rsp_ctx = {0};
+ int32_t count = 0;
+ int index = 0;
+
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (brickinfo);
+
+ ret = dict_get_int32 (op_ctx, "count", &count);
+ if (ret) {
+ count = 0;
+ } else {
+ count++;
+ }
+ ret = dict_get_int32 (rsp_dict, "index", &index);
+ if (ret)
+ goto out;
+ dict_del (rsp_dict, "index");
+
+ rsp_ctx.count = index;
+ rsp_ctx.dict = op_ctx;
+ dict_foreach (rsp_dict, _status_volume_add_brick_rsp, &rsp_ctx);
+ ret = dict_set_int32 (op_ctx, "count", count);
+
+out:
+ return ret;
+}
+
int32_t
glusterd_handle_brick_rsp (glusterd_brickinfo_t *brickinfo,
glusterd_op_t op, dict_t *rsp_dict, dict_t *op_ctx,
@@ -2516,7 +2590,11 @@ glusterd_handle_brick_rsp (glusterd_brickinfo_t *brickinfo,
case GD_OP_PROFILE_VOLUME:
ret = glusterd_profile_volume_brick_rsp (brickinfo, rsp_dict,
op_ctx, op_errstr);
- break;
+ break;
+ case GD_OP_STATUS_VOLUME:
+ ret = glusterd_status_volume_brick_rsp (brickinfo, rsp_dict,
+ op_ctx, op_errstr);
+ break;
default:
break;
@@ -2869,6 +2947,113 @@ out:
}
static int
+glusterd_bricks_select_status_volume (dict_t *dict, char **op_errstr)
+{
+ int ret = -1;
+ int cmd = 0;
+ int brick_index = -1;
+ char *volname = NULL;
+ char *brickname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_pending_node_t *pending_node = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+
+ GF_ASSERT (dict);
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ ret = dict_get_int32 (dict, "cmd", &cmd);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Unable to get status type");
+ goto out;
+ }
+
+ if (cmd & GF_CLI_STATUS_ALL)
+ goto out;
+
+ switch (cmd & GF_CLI_STATUS_MASK) {
+ case GF_CLI_STATUS_MEM:
+ case GF_CLI_STATUS_CLIENTS:
+ case GF_CLI_STATUS_INODE:
+ case GF_CLI_STATUS_FD:
+ case GF_CLI_STATUS_CALLPOOL:
+ break;
+ default:
+ goto out;
+ }
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Unable to get volname");
+ goto out;
+ }
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ goto out;
+ }
+
+ if ( (cmd & GF_CLI_STATUS_BRICK) != 0) {
+ ret = dict_get_str (dict, "brick", &brickname);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Unable to get brick");
+ goto out;
+ }
+ ret = glusterd_volume_brickinfo_get_by_brick (brickname,
+ volinfo,
+ &brickinfo,
+ GF_PATH_COMPLETE);
+ if (ret)
+ goto out;
+
+ if (uuid_compare (brickinfo->uuid, priv->uuid)||
+ !glusterd_is_brick_started (brickinfo))
+ goto out;
+
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ goto out;
+ }
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ pending_node->index = 0;
+ list_add_tail (&pending_node->list, &opinfo.pending_bricks);
+
+ ret = 0;
+ } else {
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ brick_index++;
+ if (uuid_compare (brickinfo->uuid, priv->uuid) ||
+ !glusterd_is_brick_started (brickinfo)) {
+ continue;
+ }
+ pending_node = GF_CALLOC (1, sizeof (*pending_node),
+ gf_gld_mt_pending_node_t);
+ if (!pending_node) {
+ ret = -1;
+ gf_log (THIS->name ,GF_LOG_ERROR,
+ "Unable to allocate memory");
+ goto out;
+ }
+ pending_node->node = brickinfo;
+ pending_node->type = GD_NODE_BRICK;
+ pending_node->index = brick_index;
+ list_add_tail (&pending_node->list,
+ &opinfo.pending_bricks);
+ pending_node = NULL;
+ }
+ }
+out:
+ return ret;
+}
+
+static int
glusterd_op_ac_send_brick_op (glusterd_op_sm_event_t *event, void *ctx)
{
int ret = 0;
@@ -2990,6 +3175,10 @@ glusterd_op_bricks_select (glusterd_op_t op, dict_t *dict, char **op_errstr)
ret = glusterd_bricks_select_heal_volume (dict, op_errstr);
break;
+ case GD_OP_STATUS_VOLUME:
+ ret = glusterd_bricks_select_status_volume (dict, op_errstr);
+ break;
+
default:
break;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index a4dd7e25c..21d8ab0ac 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -1038,8 +1038,7 @@ glusterd_volume_status_use_rsp_dict (dict_t *rsp_dict)
rsp_ctx.count = count;
rsp_ctx.dict = ctx_dict;
dict_foreach (rsp_dict, glusterd_volume_status_add_peer_rsp, &rsp_ctx);
- dict_del (ctx_dict, "count");
- ret = dict_get_int32 (ctx_dict, "count", &brick_count);
+
ret = dict_set_int32 (ctx_dict, "count", count + brick_count);
out:
return ret;
@@ -1626,9 +1625,13 @@ glusterd3_1_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
call_frame_t *frame = NULL;
glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
dict_t *dict = NULL;
+ int index = 0;
+ glusterd_req_ctx_t *req_ctx = NULL;
+ glusterd_pending_node_t *node = NULL;
GF_ASSERT (req);
frame = myframe;
+ req_ctx = frame->local;
if (-1 == req->rpc_status) {
rsp.op_ret = -1;
@@ -1643,7 +1646,7 @@ glusterd3_1_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
gf_log ("", GF_LOG_ERROR, "error");
rsp.op_ret = -1;
rsp.op_errno = EINVAL;
- rsp.op_errstr = strdup ("Unable to decode response");
+ rsp.op_errstr = strdup ("Unable to decode brick op response");
event_type = GD_OP_EVENT_RCVD_RJT;
goto out;
}
@@ -1668,6 +1671,19 @@ glusterd3_1_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
op_ret = rsp.op_ret;
+ /* Add index to rsp_dict for GD_OP_STATUS_VOLUME */
+ if (GD_OP_STATUS_VOLUME == req_ctx->op) {
+ node = frame->cookie;
+ index = node->index;
+ ret = dict_set_int32 (dict, "index", index);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Error setting index on brick status rsp dict");
+ rsp.op_ret = -1;
+ event_type = GD_OP_EVENT_RCVD_RJT;
+ goto out;
+ }
+ }
out:
ev_ctx = GF_CALLOC (1, sizeof (*ev_ctx), gf_gld_mt_brick_rsp_ctx_t);
GF_ASSERT (ev_ctx);
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 6e9619250..44f84d78d 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -240,6 +240,7 @@ typedef struct glusterd_pending_node_ {
struct list_head list;
void *node;
gd_node_type type;
+ int32_t index;
} glusterd_pending_node_t;
enum glusterd_op_ret {
diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
index d767199d0..60bc517dd 100644
--- a/xlators/protocol/server/src/server.c
+++ b/xlators/protocol/server/src/server.c
@@ -164,6 +164,38 @@ ret:
/* */
int
+server_fd_to_dict (xlator_t *this, dict_t *dict)
+{
+ server_conf_t *conf = NULL;
+ server_connection_t *trav = NULL;
+ char key[GF_DUMP_MAX_BUF_LEN] = {0,};
+ int count = 0;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, this, out);
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+
+ conf = this->private;
+ if (!conf)
+ return -1;
+
+ ret = pthread_mutex_trylock (&conf->mutex);
+ if (ret)
+ return -1;
+
+ list_for_each_entry (trav, &conf->conns, list) {
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "conn%d", count++);
+ fdtable_dump_to_dict (trav->fdtable, key, dict);
+ }
+ pthread_mutex_unlock (&conf->mutex);
+
+ ret = dict_set_int32 (dict, "conncount", count);
+out:
+ return ret;
+}
+
+int
server_fd (xlator_t *this)
{
server_conf_t *conf = NULL;
@@ -218,6 +250,53 @@ out:
}
int
+server_priv_to_dict (xlator_t *this, dict_t *dict)
+{
+ server_conf_t *conf = NULL;
+ rpc_transport_t *xprt = NULL;
+ peer_info_t *peerinfo = NULL;
+ char key[32] = {0,};
+ int count = 0;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, this, out);
+ GF_VALIDATE_OR_GOTO (THIS->name, dict, out);
+
+ conf = this->private;
+ if (!conf)
+ return 0;
+ //TODO: Dump only specific info to dict
+
+ list_for_each_entry (xprt, &conf->xprt_list, list) {
+ peerinfo = &xprt->peerinfo;
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "client%d.hostname", count);
+ ret = dict_set_str (dict, key, peerinfo->identifier);
+ if (ret)
+ goto out;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "client%d.bytesread", count);
+ ret = dict_set_uint64 (dict, key, xprt->total_bytes_read);
+ if (ret)
+ goto out;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "client%d.byteswrite", count);
+ ret = dict_set_uint64 (dict, key, xprt->total_bytes_write);
+ if (ret)
+ goto out;
+
+ count++;
+ }
+
+ ret = dict_set_int32 (dict, "clientcount", count);
+
+out:
+ return ret;
+}
+
+int
server_priv (xlator_t *this)
{
server_conf_t *conf = NULL;
@@ -250,6 +329,57 @@ out:
}
int
+server_inode_to_dict (xlator_t *this, dict_t *dict)
+{
+ server_conf_t *conf = NULL;
+ server_connection_t *trav = NULL;
+ char key[32] = {0,};
+ int count = 0;
+ int ret = -1;
+ xlator_t *prev_bound_xl = NULL;
+
+ GF_VALIDATE_OR_GOTO (THIS->name, this, out);
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+
+ conf = this->private;
+ if (!conf)
+ return -1;
+
+ ret = pthread_mutex_trylock (&conf->mutex);
+ if (ret)
+ return -1;
+
+ list_for_each_entry (trav, &conf->conns, list) {
+ if (trav->bound_xl && trav->bound_xl->itable) {
+ /* Presently every brick contains only one
+ * bound_xl for all connections. This will lead
+ * to duplicating of the inode lists, if listing
+ * is done for every connection. This simple check
+ * prevents duplication in the present case. If
+ * need arises the check can be improved.
+ */
+ if (trav->bound_xl == prev_bound_xl)
+ continue;
+ prev_bound_xl = trav->bound_xl;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "conn%d", count);
+ inode_table_dump_to_dict (trav->bound_xl->itable,
+ key, dict);
+ count++;
+ }
+ }
+ pthread_mutex_unlock (&conf->mutex);
+
+ ret = dict_set_int32 (dict, "conncount", count);
+
+out:
+ if (prev_bound_xl)
+ prev_bound_xl = NULL;
+ return ret;
+}
+
+int
server_inode (xlator_t *this)
{
server_conf_t *conf = NULL;
@@ -811,9 +941,12 @@ struct xlator_cbks cbks = {
};
struct xlator_dumpops dumpops = {
- .priv = server_priv,
- .fd = server_fd,
- .inode = server_inode,
+ .priv = server_priv,
+ .fd = server_fd,
+ .inode = server_inode,
+ .priv_to_dict = server_priv_to_dict,
+ .fd_to_dict = server_fd_to_dict,
+ .inode_to_dict = server_inode_to_dict,
};