summaryrefslogtreecommitdiffstats
path: root/libglusterfs/src/client_t.c
diff options
context:
space:
mode:
authorJeff Darcy <jdarcy@redhat.com>2016-12-08 16:24:15 -0500
committerVijay Bellur <vbellur@redhat.com>2017-01-30 19:13:58 -0500
commit1a95fc3036db51b82b6a80952f0908bc2019d24a (patch)
treeb983ac196a8165d5cb5e860a5ef97d3e9a41b5c9 /libglusterfs/src/client_t.c
parent7f7d7a939e46b330a084d974451eee4757ba61b4 (diff)
core: run many bricks within one glusterfsd process
This patch adds support for multiple brick translator stacks running in a single brick server process. This reduces our per-brick memory usage by approximately 3x, and our appetite for TCP ports even more. It also creates potential to avoid process/thread thrashing, and to improve QoS by scheduling more carefully across the bricks, but realizing that potential will require further work. Multiplexing is controlled by the "cluster.brick-multiplex" global option. By default it's off, and bricks are started in separate processes as before. If multiplexing is enabled, then *compatible* bricks (mostly those with the same transport options) will be started in the same process. Change-Id: I45059454e51d6f4cbb29a4953359c09a408695cb BUG: 1385758 Signed-off-by: Jeff Darcy <jdarcy@redhat.com> Reviewed-on: https://review.gluster.org/14763 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'libglusterfs/src/client_t.c')
-rw-r--r--libglusterfs/src/client_t.c49
1 files changed, 34 insertions, 15 deletions
diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c
index b3eb4e4df8c..c20c4089ec3 100644
--- a/libglusterfs/src/client_t.c
+++ b/libglusterfs/src/client_t.c
@@ -331,11 +331,25 @@ gf_client_ref (client_t *client)
static void
+gf_client_destroy_recursive (xlator_t *xl, client_t *client)
+{
+ xlator_list_t *trav;
+
+ if (xl->cbks->client_destroy) {
+ xl->cbks->client_destroy (xl, client);
+ }
+
+ for (trav = xl->children; trav; trav = trav->next) {
+ gf_client_destroy_recursive (trav->xlator, client);
+ }
+}
+
+
+static void
client_destroy (client_t *client)
{
clienttable_t *clienttable = NULL;
glusterfs_graph_t *gtrav = NULL;
- xlator_t *xtrav = NULL;
if (client == NULL){
gf_msg_callingfn ("xlator", GF_LOG_ERROR, EINVAL,
@@ -358,12 +372,7 @@ client_destroy (client_t *client)
UNLOCK (&clienttable->lock);
list_for_each_entry (gtrav, &client->this->ctx->graphs, list) {
- xtrav = gtrav->top;
- while (xtrav != NULL) {
- if (xtrav->cbks->client_destroy != NULL)
- xtrav->cbks->client_destroy (xtrav, client);
- xtrav = xtrav->next;
- }
+ gf_client_destroy_recursive (gtrav->top, client);
}
GF_FREE (client->auth.data);
GF_FREE (client->auth.username);
@@ -375,22 +384,32 @@ out:
return;
}
+static int
+gf_client_disconnect_recursive (xlator_t *xl, client_t *client)
+{
+ int ret = 0;
+ xlator_list_t *trav;
+
+ if (xl->cbks->client_disconnect) {
+ ret = xl->cbks->client_disconnect (xl, client);
+ }
+
+ for (trav = xl->children; trav; trav = trav->next) {
+ ret |= gf_client_disconnect_recursive (trav->xlator, client);
+ }
+
+ return ret;
+}
+
int
gf_client_disconnect (client_t *client)
{
int ret = 0;
glusterfs_graph_t *gtrav = NULL;
- xlator_t *xtrav = NULL;
list_for_each_entry (gtrav, &client->this->ctx->graphs, list) {
- xtrav = gtrav->top;
- while (xtrav != NULL) {
- if (xtrav->cbks->client_disconnect != NULL)
- if (xtrav->cbks->client_disconnect (xtrav, client) != 0)
- ret = -1;
- xtrav = xtrav->next;
- }
+ ret |= gf_client_disconnect_recursive (gtrav->top, client);
}
return ret;