summaryrefslogtreecommitdiffstats
path: root/xlators/protocol
diff options
context:
space:
mode:
authorhari gowtham <hgowtham@redhat.com>2018-04-11 17:38:26 +0530
committerAtin Mukherjee <amukherj@redhat.com>2018-04-19 02:54:50 +0000
commitbe26b0da2f1a7fe336400de6a1c016716983bd38 (patch)
tree573d0289d2556cbf99085e7888197bea2b07ee23 /xlators/protocol
parent054cecc30676017f83a18847734d9fe0fcb8ea72 (diff)
glusterd: volume inode/fd status broken with brick mux
Problem: The values for inode/fd was populated from the ctx received from the server xlator. Without brickmux, every brick from a volume belonged to a single brick from the volume. So searching the server and populating it worked. With brickmux, a number of bricks can be confined to a single process. These bricks can be from different volumes too (if we use the max-bricks-per-process option). If they are from different volumes, using the server xlator to populate causes problem. Fix: Use the brick to validate and populate the inode/fd status. Signed-off-by: hari gowtham <hgowtham@redhat.com> Change-Id: I2543fa5397ea095f8338b518460037bba3dfdbfd fixes: bz#1566067
Diffstat (limited to 'xlators/protocol')
-rw-r--r--xlators/protocol/server/src/server.c92
1 files changed, 50 insertions, 42 deletions
diff --git a/xlators/protocol/server/src/server.c b/xlators/protocol/server/src/server.c
index 8fd2d7c384a..fe1fb71a7ef 100644
--- a/xlators/protocol/server/src/server.c
+++ b/xlators/protocol/server/src/server.c
@@ -167,7 +167,7 @@ ret:
int
-server_priv_to_dict (xlator_t *this, dict_t *dict)
+server_priv_to_dict (xlator_t *this, dict_t *dict, char *brickname)
{
server_conf_t *conf = NULL;
rpc_transport_t *xprt = NULL;
@@ -187,47 +187,55 @@ server_priv_to_dict (xlator_t *this, dict_t *dict)
pthread_mutex_lock (&conf->mutex);
{
list_for_each_entry (xprt, &conf->xprt_list, list) {
- peerinfo = &xprt->peerinfo;
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.hostname",
- count);
- ret = dict_set_str (dict, key, peerinfo->identifier);
- if (ret)
- goto unlock;
-
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.bytesread",
- count);
- ret = dict_set_uint64 (dict, key,
- xprt->total_bytes_read);
- if (ret)
- goto unlock;
-
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.byteswrite",
- count);
- ret = dict_set_uint64 (dict, key,
- xprt->total_bytes_write);
- if (ret)
- goto unlock;
-
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.opversion",
- count);
- ret = dict_set_uint32 (dict, key,
- peerinfo->max_op_version);
- if (ret)
- goto unlock;
-
- memset (key, 0, sizeof (key));
- snprintf (key, sizeof (key), "client%d.name",
- count);
- ret = dict_set_str (dict, key,
- xprt->xl_private->client_name);
- if (ret)
- goto unlock;
-
- count++;
+ if (!strcmp (brickname,
+ xprt->xl_private->bound_xl->name)) {
+ peerinfo = &xprt->peerinfo;
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "client%d.hostname",
+ count);
+ ret = dict_set_str (dict, key,
+ peerinfo->identifier);
+ if (ret)
+ goto unlock;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "client%d.bytesread",
+ count);
+ ret = dict_set_uint64 (dict, key,
+ xprt->total_bytes_read);
+ if (ret)
+ goto unlock;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "client%d.byteswrite",
+ count);
+ ret = dict_set_uint64 (dict, key,
+ xprt->total_bytes_write);
+ if (ret)
+ goto unlock;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key),
+ "client%d.opversion",
+ count);
+ ret = dict_set_uint32 (dict, key,
+ peerinfo->max_op_version);
+ if (ret)
+ goto unlock;
+
+ memset (key, 0, sizeof (key));
+ snprintf (key, sizeof (key), "client%d.name",
+ count);
+ ret = dict_set_str (dict, key,
+ xprt->xl_private->client_name);
+ if (ret)
+ goto unlock;
+
+ count++;
+ }
}
}
unlock: