summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorhari gowtham <hgowtham@redhat.com>2018-04-11 17:38:26 +0530
committerAtin Mukherjee <amukherj@redhat.com>2018-04-19 02:54:50 +0000
commitbe26b0da2f1a7fe336400de6a1c016716983bd38 (patch)
tree573d0289d2556cbf99085e7888197bea2b07ee23
parent054cecc30676017f83a18847734d9fe0fcb8ea72 (diff)
glusterd: volume inode/fd status broken with brick mux
Problem: The values for inode/fd was populated from the ctx received from the server xlator. Without brickmux, every brick from a volume belonged to a single brick from the volume. So searching the server and populating it worked. With brickmux, a number of bricks can be confined to a single process. These bricks can be from different volumes too (if we use the max-bricks-per-process option). If they are from different volumes, using the server xlator to populate causes problem. Fix: Use the brick to validate and populate the inode/fd status. Signed-off-by: hari gowtham <hgowtham@redhat.com> Change-Id: I2543fa5397ea095f8338b518460037bba3dfdbfd fixes: bz#1566067
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c34
-rw-r--r--libglusterfs/src/client_t.c54
-rw-r--r--libglusterfs/src/xlator.h3
-rw-r--r--tests/basic/volume-status.t12
-rw-r--r--xlators/features/cloudsync/src/cloudsync.c2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c3
-rw-r--r--xlators/nfs/server/src/nfs.c2
-rw-r--r--xlators/protocol/server/src/server.c92
9 files changed, 119 insertions, 87 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 43f7f6cf043..bce8d5cc276 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -1119,14 +1119,14 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req)
glusterfs_ctx_t *ctx = NULL;
glusterfs_graph_t *active = NULL;
xlator_t *this = NULL;
- xlator_t *any = NULL;
- xlator_t *xlator = NULL;
+ xlator_t *server_xl = NULL;
+ xlator_t *brick_xl = NULL;
dict_t *dict = NULL;
dict_t *output = NULL;
- char *volname = NULL;
char *xname = NULL;
uint32_t cmd = 0;
char *msg = NULL;
+ char *brickname = NULL;
GF_ASSERT (req);
this = THIS;
@@ -1154,32 +1154,26 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req)
goto out;
}
- ret = dict_get_str (dict, "volname", &volname);
+ ret = dict_get_str (dict, "brick-name", &brickname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Couldn't get volname");
+ gf_log (this->name, GF_LOG_ERROR, "Couldn't get brickname from"
+ " dict");
goto out;
}
ctx = glusterfsd_ctx;
GF_ASSERT (ctx);
active = ctx->active;
- any = active->first;
+ server_xl = active->first;
- ret = gf_asprintf (&xname, "%s-server", volname);
- if (-1 == ret) {
- gf_log (this->name, GF_LOG_ERROR, "Out of memory");
- goto out;
- }
-
- xlator = xlator_search_by_name (any, xname);
- if (!xlator) {
+ brick_xl = get_xlator_by_name (server_xl, brickname);
+ if (!brick_xl) {
gf_log (this->name, GF_LOG_ERROR, "xlator %s is not loaded",
xname);
ret = -1;
goto out;
}
-
output = dict_new ();
switch (cmd & GF_CLI_STATUS_MASK) {
case GF_CLI_STATUS_MEM:
@@ -1190,15 +1184,17 @@ glusterfs_handle_brick_status (rpcsvc_request_t *req)
case GF_CLI_STATUS_CLIENTS:
case GF_CLI_STATUS_CLIENT_LIST:
- ret = xlator->dumpops->priv_to_dict (xlator, output);
+ ret = server_xl->dumpops->priv_to_dict (server_xl,
+ output, brickname);
break;
case GF_CLI_STATUS_INODE:
- ret = xlator->dumpops->inode_to_dict (xlator, output);
+ ret = server_xl->dumpops->inode_to_dict (brick_xl,
+ output);
break;
case GF_CLI_STATUS_FD:
- ret = xlator->dumpops->fd_to_dict (xlator, output);
+ ret = server_xl->dumpops->fd_to_dict (brick_xl, output);
break;
case GF_CLI_STATUS_CALLPOOL:
@@ -1374,7 +1370,7 @@ glusterfs_handle_node_status (rpcsvc_request_t *req)
"Error setting volname to dict");
goto out;
}
- ret = node->dumpops->priv_to_dict (node, output);
+ ret = node->dumpops->priv_to_dict (node, output, NULL);
break;
case GF_CLI_STATUS_INODE:
diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c
index a51fb7a88c0..63f4bbb4b06 100644
--- a/libglusterfs/src/client_t.c
+++ b/libglusterfs/src/client_t.c
@@ -744,10 +744,13 @@ gf_client_dump_fdtables_to_dict (xlator_t *this, dict_t *dict)
clienttable->cliententries[count].next_free)
continue;
client = clienttable->cliententries[count].client;
- memset(key, 0, sizeof key);
- snprintf (key, sizeof key, "conn%d", count++);
- fdtable_dump_to_dict (client->server_ctx.fdtable,
- key, dict);
+ if (!strcmp (client->bound_xl->name, this->name)) {
+ memset(key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "conn%d", count++);
+ fdtable_dump_to_dict (client->server_ctx.
+ fdtable,
+ key, dict);
+ }
}
}
UNLOCK(&clienttable->lock);
@@ -860,25 +863,30 @@ gf_client_dump_inodes_to_dict (xlator_t *this, dict_t *dict)
clienttable->cliententries[count].next_free)
continue;
client = clienttable->cliententries[count].client;
- memset(key, 0, sizeof key);
- if (client->bound_xl && client->bound_xl->itable) {
- /* Presently every brick contains only
- * one bound_xl for all connections.
- * This will lead to duplicating of
- * the inode lists, if listing is
- * done for every connection. This
- * simple check prevents duplication
- * in the present case. If need arises
- * the check can be improved.
- */
- if (client->bound_xl == prev_bound_xl)
- continue;
- prev_bound_xl = client->bound_xl;
-
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "conn%d", count);
- inode_table_dump_to_dict (client->bound_xl->itable,
- key, dict);
+ if (!strcmp (client->bound_xl->name, this->name)) {
+ memset(key, 0, sizeof (key));
+ if (client->bound_xl && client->bound_xl->
+ itable) {
+ /* Presently every brick contains only
+ * one bound_xl for all connections.
+ * This will lead to duplicating of
+ * the inode lists, if listing is
+ * done for every connection. This
+ * simple check prevents duplication
+ * in the present case. If need arises
+ * the check can be improved.
+ */
+ if (client->bound_xl == prev_bound_xl)
+ continue;
+ prev_bound_xl = client->bound_xl;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "conn%d",
+ count);
+ inode_table_dump_to_dict (client->
+ bound_xl->itable,
+ key, dict);
+ }
}
}
}
diff --git a/libglusterfs/src/xlator.h b/libglusterfs/src/xlator.h
index f41ebddd9a9..2f8fed6bb64 100644
--- a/libglusterfs/src/xlator.h
+++ b/libglusterfs/src/xlator.h
@@ -922,7 +922,8 @@ typedef int32_t (*dumpop_inodectx_t) (xlator_t *this, inode_t *ino);
typedef int32_t (*dumpop_fdctx_t) (xlator_t *this, fd_t *fd);
-typedef int32_t (*dumpop_priv_to_dict_t) (xlator_t *this, dict_t *dict);
+typedef int32_t (*dumpop_priv_to_dict_t) (xlator_t *this, dict_t *dict,
+ char *brickname);
typedef int32_t (*dumpop_inode_to_dict_t) (xlator_t *this, dict_t *dict);
diff --git a/tests/basic/volume-status.t b/tests/basic/volume-status.t
index 14ea1c64827..8cea5c7530a 100644
--- a/tests/basic/volume-status.t
+++ b/tests/basic/volume-status.t
@@ -10,6 +10,14 @@ function gluster_client_list_status () {
gluster volume status $V0 client-list | sed -n '/Name/','/total/'p | wc -l
}
+function gluster_fd_status () {
+ gluster volume status $V0 fd | sed -n '/Brick :/ p' | wc -l
+}
+
+function gluster_inode_status () {
+ gluster volume status $V0 inode | sed -n '/Connection / p' | wc -l
+}
+
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
@@ -25,6 +33,10 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status
## Mount FUSE
TEST $GFS -s $H0 --volfile-id $V0 $M0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "8" gluster_fd_status
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1024" gluster_inode_status
+
##Disabling this test until the client-list command works for brick-multiplexing
#EXPECT_WITHIN $PROCESS_UP_TIMEOUT "7" gluster_client_list_status
diff --git a/xlators/features/cloudsync/src/cloudsync.c b/xlators/features/cloudsync/src/cloudsync.c
index 8d74202706e..48e27c372b6 100644
--- a/xlators/features/cloudsync/src/cloudsync.c
+++ b/xlators/features/cloudsync/src/cloudsync.c
@@ -1596,7 +1596,7 @@ cs_inodectx_to_dict (xlator_t *this,
int32_t
cs_priv_to_dict (xlator_t *this,
- dict_t *dict)
+ dict_t *dict, char *brickname)
{
return 0;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 049c90e9cd3..ac976c2195b 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -5243,6 +5243,10 @@ glusterd_print_client_details (FILE *fp, dict_t *dict,
brick_req->op = GLUSTERD_BRICK_STATUS;
brick_req->name = "";
+ ret = dict_set_str (dict, "brick-name", brickinfo->path);
+ if (ret)
+ goto out;
+
ret = dict_set_int32 (dict, "cmd", GF_CLI_STATUS_CLIENTS);
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index ffb6ae9c209..64855cd60fa 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -615,6 +615,9 @@ glusterd_brick_op_build_payload (glusterd_op_t op, glusterd_brickinfo_t *brickin
goto out;
brick_req->op = GLUSTERD_BRICK_STATUS;
brick_req->name = "";
+ ret = dict_set_str (dict, "brick-name", brickinfo->path);
+ if (ret)
+ goto out;
}
break;
case GD_OP_REBALANCE:
diff --git a/xlators/nfs/server/src/nfs.c b/xlators/nfs/server/src/nfs.c
index 577c67833ce..1ac5a9213ac 100644
--- a/xlators/nfs/server/src/nfs.c
+++ b/xlators/nfs/server/src/nfs.c
@@ -1635,7 +1635,7 @@ _nfs_export_is_for_vol (char *exname, char *volname)
}
int
-nfs_priv_to_dict (xlator_t *this, dict_t *dict)
+nfs_priv_to_dict (xlator_t *this, dict_t *dict, char *brickname)
{
int ret = -1;
struct nfs_state *priv = NULL;
diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
index 8fd2d7c384a..fe1fb71a7ef 100644
--- a/xlators/protocol/server/src/server.c
+++ b/xlators/protocol/server/src/server.c
@@ -167,7 +167,7 @@ ret:
int
-server_priv_to_dict (xlator_t *this, dict_t *dict)
+server_priv_to_dict (xlator_t *this, dict_t *dict, char *brickname)
{
server_conf_t *conf = NULL;
rpc_transport_t *xprt = NULL;
@@ -187,47 +187,55 @@ server_priv_to_dict (xlator_t *this, dict_t *dict)
pthread_mutex_lock (&conf->mutex);
{
list_for_each_entry (xprt, &conf->xprt_list, list) {
- peerinfo = &xprt->peerinfo;
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.hostname",
- count);
- ret = dict_set_str (dict, key, peerinfo->identifier);
- if (ret)
- goto unlock;
-
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.bytesread",
- count);
- ret = dict_set_uint64 (dict, key,
- xprt->total_bytes_read);
- if (ret)
- goto unlock;
-
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.byteswrite",
- count);
- ret = dict_set_uint64 (dict, key,
- xprt->total_bytes_write);
- if (ret)
- goto unlock;
-
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.opversion",
- count);
- ret = dict_set_uint32 (dict, key,
- peerinfo->max_op_version);
- if (ret)
- goto unlock;
-
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.name",
- count);
- ret = dict_set_str (dict, key,
- xprt->xl_private->client_name);
- if (ret)
- goto unlock;
-
- count++;
+ if (!strcmp (brickname,
+ xprt->xl_private->bound_xl->name)) {
+ peerinfo = &xprt->peerinfo;
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "client%d.hostname",
+ count);
+ ret = dict_set_str (dict, key,
+ peerinfo->identifier);
+ if (ret)
+ goto unlock;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "client%d.bytesread",
+ count);
+ ret = dict_set_uint64 (dict, key,
+ xprt->total_bytes_read);
+ if (ret)
+ goto unlock;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "client%d.byteswrite",
+ count);
+ ret = dict_set_uint64 (dict, key,
+ xprt->total_bytes_write);
+ if (ret)
+ goto unlock;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "client%d.opversion",
+ count);
+ ret = dict_set_uint32 (dict, key,
+ peerinfo->max_op_version);
+ if (ret)
+ goto unlock;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "client%d.name",
+ count);
+ ret = dict_set_str (dict, key,
+ xprt->xl_private->client_name);
+ if (ret)
+ goto unlock;
+
+ count++;
+ }
}
}
unlock: