summaryrefslogtreecommitdiffstats
path: root/libglusterfs
diff options
context:
space:
mode:
authorShyam <srangana@redhat.com>2015-01-26 14:20:31 -0500
committerVijay Bellur <vbellur@redhat.com>2015-02-07 13:23:03 -0800
commita7f5893c9243c8c563db215352fa7e47f6968e8b (patch)
tree27feeeb5888accae0763593b489373cff1436d6a /libglusterfs
parentc61074400a45e69c6edbf82b8ed02568726d37ae (diff)
epoll: Adding the ability to configure epoll threads
Add the ability to configure the number of event threads for various gluster services. Currently with the multi thread epoll patch, it is possible to have more than one thread waiting on socket activity and processing the same. This thread count is currently static, which this commit makes dynamic. The current services which use IO path, i.e brick processes, any client process (nfs, FUSE, gfapi, heal, rebalance, etc.a), gain 2 set parameters to control the number of threads that are processing events. These settings are, - client.event-threads <n> - server.event-threads <n> The client setting affects the client graph consumers, and the server setting affects the brick processes. These are processed and inited/reconfigured using the client/server protocol xlators. Other services (say glusterd) would need to extend similar configuration settings to take advantage of multi threaded event processing. At present glusterd is not enabled with this commit, as it does not stand to gain from this multi-threading (as I understand it). Change-Id: Id8422fc57a9f95a135158eb6477ccf9d3c9ea4d9 BUG: 1104462 Signed-off-by: Shyam <srangana@redhat.com> Reviewed-on: http://review.gluster.org/9488 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'libglusterfs')
-rw-r--r--libglusterfs/src/event-epoll.c210
-rw-r--r--libglusterfs/src/event-poll.c17
-rw-r--r--libglusterfs/src/event.c21
-rw-r--r--libglusterfs/src/event.h15
-rw-r--r--libglusterfs/src/glusterfs.h2
5 files changed, 235 insertions, 30 deletions
diff --git a/libglusterfs/src/event-epoll.c b/libglusterfs/src/event-epoll.c
index 9082954e4e4..8d42fa71fb6 100644
--- a/libglusterfs/src/event-epoll.c
+++ b/libglusterfs/src/event-epoll.c
@@ -43,6 +43,10 @@ struct event_slot_epoll {
gf_lock_t lock;
};
+struct event_thread_data {
+ struct event_pool *event_pool;
+ int event_index;
+};
static struct event_slot_epoll *
__event_newtable (struct event_pool *event_pool, int table_idx)
@@ -232,7 +236,7 @@ done:
static struct event_pool *
-event_pool_new_epoll (int count)
+event_pool_new_epoll (int count, int eventthreadcount)
{
struct event_pool *event_pool = NULL;
int epfd = -1;
@@ -258,6 +262,8 @@ event_pool_new_epoll (int count)
event_pool->count = count;
+ event_pool->eventthreadcount = eventthreadcount;
+
pthread_mutex_init (&event_pool->mutex, NULL);
out:
@@ -585,11 +591,45 @@ event_dispatch_epoll_worker (void *data)
{
struct epoll_event event;
int ret = -1;
- struct event_pool *event_pool = data;
+ struct event_thread_data *ev_data = data;
+ struct event_pool *event_pool;
+ int myindex = -1;
+ int timetodie = 0;
+
+ GF_VALIDATE_OR_GOTO ("event", ev_data, out);
+
+ event_pool = ev_data->event_pool;
+ myindex = ev_data->event_index;
GF_VALIDATE_OR_GOTO ("event", event_pool, out);
+ gf_log ("epoll", GF_LOG_INFO, "Started thread with index %d", myindex);
+
for (;;) {
+ if (event_pool->eventthreadcount < myindex) {
+ /* ...time to die, thread count was decreased below
+ * this threads index */
+ /* Start with extra safety at this point, reducing
+ * lock conention in normal case when threads are not
+ * reconfigured always */
+ pthread_mutex_lock (&event_pool->mutex);
+ {
+ if (event_pool->eventthreadcount <
+ myindex) {
+ /* if found true in critical section,
+ * die */
+ event_pool->pollers[myindex - 1] = 0;
+ timetodie = 1;
+ }
+ }
+ pthread_mutex_unlock (&event_pool->mutex);
+ if (timetodie) {
+ gf_log ("epoll", GF_LOG_INFO,
+ "Exited thread with index %d", myindex);
+ goto out;
+ }
+ }
+
ret = epoll_wait (event_pool->fd, &event, 1, -1);
if (ret == 0)
@@ -603,40 +643,164 @@ event_dispatch_epoll_worker (void *data)
ret = event_dispatch_epoll_handler (event_pool, &event);
}
out:
+ if (ev_data)
+ GF_FREE (ev_data);
return NULL;
}
-
-#define GLUSTERFS_EPOLL_MAXTHREADS 2
-
-
+/* Attempts to start the # of configured pollers, ensuring at least the first
+ * is started in a joinable state */
static int
event_dispatch_epoll (struct event_pool *event_pool)
{
- int i = 0;
- pthread_t pollers[GLUSTERFS_EPOLL_MAXTHREADS];
- int ret = -1;
-
- for (i = 0; i < GLUSTERFS_EPOLL_MAXTHREADS; i++) {
- ret = pthread_create (&pollers[i], NULL,
- event_dispatch_epoll_worker,
- event_pool);
- }
+ int i = 0;
+ pthread_t t_id;
+ int pollercount = 0;
+ int ret = -1;
+ struct event_thread_data *ev_data = NULL;
+
+ /* Start the configured number of pollers */
+ pthread_mutex_lock (&event_pool->mutex);
+ {
+ pollercount = event_pool->eventthreadcount;
+
+ /* Set to MAX if greater */
+ if (pollercount > EVENT_MAX_THREADS)
+ pollercount = EVENT_MAX_THREADS;
+
+ /* Default pollers to 1 in case this is incorrectly set */
+ if (pollercount <= 0)
+ pollercount = 1;
+
+ for (i = 0; i < pollercount; i++) {
+ ev_data = GF_CALLOC (1, sizeof (*ev_data),
+ gf_common_mt_event_pool);
+ if (!ev_data) {
+ gf_log ("epoll", GF_LOG_WARNING,
+ "Allocation failure for index %d", i);
+ if (i == 0) {
+ /* Need to suceed creating 0'th
+ * thread, to joinable and wait */
+ break;
+ } else {
+ /* Inability to create other threads
+ * are a lesser evil, and ignored */
+ continue;
+ }
+ }
+
+ ev_data->event_pool = event_pool;
+ ev_data->event_index = i + 1;
+
+ ret = pthread_create (&t_id, NULL,
+ event_dispatch_epoll_worker,
+ ev_data);
+ if (!ret) {
+ event_pool->pollers[i] = t_id;
+
+ /* mark all threads other than one in index 0
+ * as detachable. Errors can be ignored, they
+ * spend their time as zombies if not detched
+ * and the thread counts are decreased */
+ if (i != 0)
+ pthread_detach (event_pool->pollers[i]);
+ } else {
+ gf_log ("epoll", GF_LOG_WARNING,
+ "Failed to start thread for index %d",
+ i);
+ if (i == 0) {
+ GF_FREE (ev_data);
+ break;
+ } else {
+ GF_FREE (ev_data);
+ continue;
+ }
+ }
+ }
+ }
+ pthread_mutex_unlock (&event_pool->mutex);
- for (i = 0; i < GLUSTERFS_EPOLL_MAXTHREADS; i++)
- pthread_join (pollers[i], NULL);
+ /* Just wait for the first thread, that is created in a joinable state
+ * and will never die, ensuring this function never returns */
+ if (event_pool->pollers[0] != 0)
+ pthread_join (event_pool->pollers[0], NULL);
return ret;
}
+int
+event_reconfigure_threads_epoll (struct event_pool *event_pool, int value)
+{
+ int i;
+ int ret;
+ pthread_t t_id;
+ int oldthreadcount;
+ struct event_thread_data *ev_data = NULL;
+
+ /* Set to MAX if greater */
+ if (value > EVENT_MAX_THREADS)
+ value = EVENT_MAX_THREADS;
+
+ /* Default pollers to 1 in case this is set incorrectly */
+ if (value <= 0)
+ value = 1;
+
+ pthread_mutex_lock (&event_pool->mutex);
+ {
+ oldthreadcount = event_pool->eventthreadcount;
+
+ if (oldthreadcount < value) {
+ /* create more poll threads */
+ for (i = oldthreadcount; i < value; i++) {
+ /* Start a thread if the index at this location
+ * is a 0, so that the older thread is confirmed
+ * as dead */
+ if (event_pool->pollers[i] == 0) {
+ ev_data = GF_CALLOC (1,
+ sizeof (*ev_data),
+ gf_common_mt_event_pool);
+ if (!ev_data) {
+ gf_log ("epoll", GF_LOG_WARNING,
+ "Allocation failure for"
+ " index %d", i);
+ continue;
+ }
+
+ ev_data->event_pool = event_pool;
+ ev_data->event_index = i + 1;
+
+ ret = pthread_create (&t_id, NULL,
+ event_dispatch_epoll_worker,
+ ev_data);
+ if (ret) {
+ gf_log ("epoll", GF_LOG_WARNING,
+ "Failed to start thread for"
+ " index %d", i);
+ GF_FREE (ev_data);
+ } else {
+ pthread_detach (t_id);
+ event_pool->pollers[i] = t_id;
+ }
+ }
+ }
+ }
+
+ /* if value decreases, threads will terminate, themselves */
+ event_pool->eventthreadcount = value;
+ }
+ pthread_mutex_unlock (&event_pool->mutex);
+
+ return 0;
+}
struct event_ops event_ops_epoll = {
- .new = event_pool_new_epoll,
- .event_register = event_register_epoll,
- .event_select_on = event_select_on_epoll,
- .event_unregister = event_unregister_epoll,
- .event_unregister_close = event_unregister_close_epoll,
- .event_dispatch = event_dispatch_epoll
+ .new = event_pool_new_epoll,
+ .event_register = event_register_epoll,
+ .event_select_on = event_select_on_epoll,
+ .event_unregister = event_unregister_epoll,
+ .event_unregister_close = event_unregister_close_epoll,
+ .event_dispatch = event_dispatch_epoll,
+ .event_reconfigure_threads = event_reconfigure_threads_epoll
};
#endif
diff --git a/libglusterfs/src/event-poll.c b/libglusterfs/src/event-poll.c
index a7e2e663103..c91fa8487b5 100644
--- a/libglusterfs/src/event-poll.c
+++ b/libglusterfs/src/event-poll.c
@@ -95,7 +95,7 @@ out:
static struct event_pool *
-event_pool_new_poll (int count)
+event_pool_new_poll (int count, int eventthreadcount)
{
struct event_pool *event_pool = NULL;
int ret = -1;
@@ -171,6 +171,12 @@ event_pool_new_poll (int count)
return NULL;
}
+ if (eventthreadcount > 1) {
+ gf_log ("poll", GF_LOG_INFO,
+ "Currently poll does not use multiple event processing"
+ " threads, thread count (%d) ignored", eventthreadcount);
+ }
+
return event_pool;
}
@@ -469,6 +475,12 @@ out:
return -1;
}
+int
+event_reconfigure_threads_poll (struct event_pool *event_pool, int value)
+{
+ /* No-op for poll */
+ return 0;
+}
struct event_ops event_ops_poll = {
.new = event_pool_new_poll,
@@ -476,5 +488,6 @@ struct event_ops event_ops_poll = {
.event_select_on = event_select_on_poll,
.event_unregister = event_unregister_poll,
.event_unregister_close = event_unregister_close_poll,
- .event_dispatch = event_dispatch_poll
+ .event_dispatch = event_dispatch_poll,
+ .event_reconfigure_threads = event_reconfigure_threads_poll
};
diff --git a/libglusterfs/src/event.c b/libglusterfs/src/event.c
index 6c253df3c1a..4dd0f991700 100644
--- a/libglusterfs/src/event.c
+++ b/libglusterfs/src/event.c
@@ -29,7 +29,7 @@
struct event_pool *
-event_pool_new (int count)
+event_pool_new (int count, int eventthreadcount)
{
struct event_pool *event_pool = NULL;
extern struct event_ops event_ops_poll;
@@ -37,7 +37,7 @@ event_pool_new (int count)
#ifdef HAVE_SYS_EPOLL_H
extern struct event_ops event_ops_epoll;
- event_pool = event_ops_epoll.new (count);
+ event_pool = event_ops_epoll.new (count, eventthreadcount);
if (event_pool) {
event_pool->ops = &event_ops_epoll;
@@ -48,7 +48,7 @@ event_pool_new (int count)
#endif
if (!event_pool) {
- event_pool = event_ops_poll.new (count);
+ event_pool = event_ops_poll.new (count, eventthreadcount);
if (event_pool)
event_pool->ops = &event_ops_poll;
@@ -129,3 +129,18 @@ event_dispatch (struct event_pool *event_pool)
out:
return ret;
}
+
+int
+event_reconfigure_threads (struct event_pool *event_pool, int value)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO ("event", event_pool, out);
+
+ /* call event refresh function */
+ ret = event_pool->ops->event_reconfigure_threads (event_pool,
+ value);
+
+out:
+ return ret;
+}
diff --git a/libglusterfs/src/event.h b/libglusterfs/src/event.h
index 3b3ab0e4b2f..930a7d1e28b 100644
--- a/libglusterfs/src/event.h
+++ b/libglusterfs/src/event.h
@@ -33,6 +33,7 @@ typedef int (*event_handler_t) (int fd, int idx, void *data,
#define EVENT_EPOLL_TABLES 1024
#define EVENT_EPOLL_SLOTS 1024
+#define EVENT_MAX_THREADS 32
struct event_pool {
struct event_ops *ops;
@@ -53,10 +54,16 @@ struct event_pool {
void *evcache;
int evcache_size;
+
+ /* NOTE: Currently used only when event processing is done using
+ * epoll. */
+ int eventthreadcount; /* number of event threads to execute. */
+ pthread_t pollers[EVENT_MAX_THREADS]; /* poller thread_id store,
+ * and live status */
};
struct event_ops {
- struct event_pool * (*new) (int count);
+ struct event_pool * (*new) (int count, int eventthreadcount);
int (*event_register) (struct event_pool *event_pool, int fd,
event_handler_t handler,
@@ -71,9 +78,12 @@ struct event_ops {
int idx);
int (*event_dispatch) (struct event_pool *event_pool);
+
+ int (*event_reconfigure_threads) (struct event_pool *event_pool,
+ int newcount);
};
-struct event_pool * event_pool_new (int count);
+struct event_pool *event_pool_new (int count, int eventthreadcount);
int event_select_on (struct event_pool *event_pool, int fd, int idx,
int poll_in, int poll_out);
int event_register (struct event_pool *event_pool, int fd,
@@ -82,5 +92,6 @@ int event_register (struct event_pool *event_pool, int fd,
int event_unregister (struct event_pool *event_pool, int fd, int idx);
int event_unregister_close (struct event_pool *event_pool, int fd, int idx);
int event_dispatch (struct event_pool *event_pool);
+int event_reconfigure_threads (struct event_pool *event_pool, int value);
#endif /* _EVENT_H_ */
diff --git a/libglusterfs/src/glusterfs.h b/libglusterfs/src/glusterfs.h
index 8059c976368..9c078e1d5f9 100644
--- a/libglusterfs/src/glusterfs.h
+++ b/libglusterfs/src/glusterfs.h
@@ -157,6 +157,8 @@
#define GLUSTERFS_RPC_REPLY_SIZE 24
+#define STARTING_EVENT_THREADS 1
+
#define ZR_FILE_CONTENT_REQUEST(key) (!strncmp(key, ZR_FILE_CONTENT_STR, \
ZR_FILE_CONTENT_STRLEN))