summaryrefslogtreecommitdiffstats
path: root/glusterfsd/src
diff options
context:
space:
mode:
authorhari gowtham <hgowtham@redhat.com>2018-04-11 17:38:26 +0530
committerHari Gowtham <hgowtham@redhat.com>2018-09-18 12:24:52 +0530
commitca5adfb65b08841714431e97751a0c0c63a4bbdf (patch)
tree2df2190b06e02f38e17a549f10495bd4e938540c /glusterfsd/src
parentfe5b6bc8522b3539a97765b243ad37ef227c05b6 (diff)
glusterd: volume inode/fd status broken with brick mux
backport of:https://review.gluster.org/#/c/19846/6 Problem: The values for inode/fd was populated from the ctx received from the server xlator. Without brickmux, every brick from a volume belonged to a single brick from the volume. So searching the server and populating it worked. With brickmux, a number of bricks can be confined to a single process. These bricks can be from different volumes too (if we use the max-bricks-per-process option). If they are from different volumes, using the server xlator to populate causes problem. Fix: Use the brick to validate and populate the inode/fd status. >Signed-off-by: hari gowtham <hgowtham@redhat.com> >Change-Id: I2543fa5397ea095f8338b518460037bba3dfdbfd >fixes: bz#1566067 Change-Id: I2543fa5397ea095f8338b518460037bba3dfdbfd BUG: 1569336 fixes: bz#1569336 Signed-off-by: hari gowtham <hgowtham@redhat.com>
Diffstat (limited to 'glusterfsd/src')
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c34
1 files changed, 15 insertions, 19 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 10ac6bfa6cb..610eba80fa9 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -1053,14 +1053,14 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req)
glusterfs_ctx_t *ctx = NULL;
glusterfs_graph_t *active = NULL;
xlator_t *this = NULL;
- xlator_t *any = NULL;
- xlator_t *xlator = NULL;
+ xlator_t *server_xl = NULL;
+ xlator_t *brick_xl = NULL;
dict_t *dict = NULL;
dict_t *output = NULL;
- char *volname = NULL;
char *xname = NULL;
uint32_t cmd = 0;
char *msg = NULL;
+ char *brickname = NULL;
GF_ASSERT (req);
this = THIS;
@@ -1088,32 +1088,26 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req)
goto out;
}
- ret = dict_get_str (dict, "volname", &volname);
+ ret = dict_get_str (dict, "brick-name", &brickname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Couldn't get volname");
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't get brickname from"
+ " dict");
goto out;
}
ctx = glusterfsd_ctx;
GF_ASSERT (ctx);
active = ctx->active;
- any = active->first;
+ server_xl = active->first;
- ret = gf_asprintf (&xname, "%s-server", volname);
- if (-1 == ret) {
- gf_log (this->name, GF_LOG_ERROR, "Out of memory");
- goto out;
- }
-
- xlator = xlator_search_by_name (any, xname);
- if (!xlator) {
+ brick_xl = get_xlator_by_name (server_xl, brickname);
+ if (!brick_xl) {
gf_log (this->name, GF_LOG_ERROR, "xlator %s is not loaded",
xname);
ret = -1;
goto out;
}
-
output = dict_new ();
switch (cmd & GF_CLI_STATUS_MASK) {
case GF_CLI_STATUS_MEM:
@@ -1123,15 +1117,17 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req)
break;
case GF_CLI_STATUS_CLIENTS:
- ret = xlator->dumpops->priv_to_dict (xlator, output);
+ ret = server_xl->dumpops->priv_to_dict (server_xl,
+ output, brickname);
break;
case GF_CLI_STATUS_INODE:
- ret = xlator->dumpops->inode_to_dict (xlator, output);
+ ret = server_xl->dumpops->inode_to_dict (brick_xl,
+ output);
break;
case GF_CLI_STATUS_FD:
- ret = xlator->dumpops->fd_to_dict (xlator, output);
+ ret = server_xl->dumpops->fd_to_dict (brick_xl, output);
break;
case GF_CLI_STATUS_CALLPOOL:
@@ -1307,7 +1303,7 @@ glusterfs_handle_node_status (rpcsvc_request_t *req)
"Error setting volname to dict");
goto out;
}
- ret = node->dumpops->priv_to_dict (node, output);
+ ret = node->dumpops->priv_to_dict (node, output, NULL);
break;
case GF_CLI_STATUS_INODE: