diff options
author | Raghavendra Gowdappa <rgowdapp@redhat.com> | 2018-10-31 16:10:58 +0530 |
---|---|---|
committer | Raghavendra G <rgowdapp@redhat.com> | 2018-11-29 01:19:12 +0000 |
commit | 95e380eca19b9f0d03a53429535f15556e5724ad (patch) | |
tree | be32fca4fbfa7d29e8571545af26e784d34e294c /libglusterfs/src/event-epoll.c | |
parent | f0232d07f7e6543b56830be28f6e80f9085e6241 (diff) |
rpcsvc: provide each request handler thread its own queue
A single global per program queue is contended by all request handler
threads and event threads. This can lead to high contention. So,
reduce the contention by providing each request handler thread its own
private queue.
Thanks to "Manoj Pillai"<mpillai@redhat.com> for the idea of pairing a
single queue with a fixed request-handler-thread and event-thread,
which brought down the performance regression due to overhead of
queuing significantly.
Thanks to "Xavi Hernandez"<xhernandez@redhat.com> for discussion on
how to communicate the event-thread death to request-handler-thread.
Thanks to "Karan Sandha"<ksandha@redhat.com> for voluntarily running
the perf benchmarks to qualify that performance regression introduced
by ping-timer-fixes is fixed with this patch and patiently running
many iterations of regression tests while RCAing the issue.
Thanks to "Milind Changire"<mchangir@redhat.com> for patiently running
the many iterations of perf benchmarking tests while RCAing the
regression caused by ping-timer-expiry fixes.
Change-Id: I578c3fc67713f4234bd3abbec5d3fbba19059ea5
Fixes: bz#1644629
Signed-off-by: Raghavendra Gowdappa <rgowdapp@redhat.com>
Diffstat (limited to 'libglusterfs/src/event-epoll.c')
-rw-r--r-- | libglusterfs/src/event-epoll.c | 114 |
1 files changed, 101 insertions, 13 deletions
diff --git a/libglusterfs/src/event-epoll.c b/libglusterfs/src/event-epoll.c index 9826cc9e275..041a7e6c583 100644 --- a/libglusterfs/src/event-epoll.c +++ b/libglusterfs/src/event-epoll.c @@ -30,6 +30,7 @@ struct event_slot_epoll { int fd; int events; int gen; + int idx; gf_atomic_t ref; int do_close; int in_handler; @@ -37,6 +38,7 @@ struct event_slot_epoll { void *data; event_handler_t handler; gf_lock_t lock; + struct list_head poller_death; }; struct event_thread_data { @@ -57,6 +59,7 @@ __event_newtable(struct event_pool *event_pool, int table_idx) for (i = 0; i < EVENT_EPOLL_SLOTS; i++) { table[i].fd = -1; LOCK_INIT(&table[i].lock); + INIT_LIST_HEAD(&table[i].poller_death); } event_pool->ereg[table_idx] = table; @@ -66,7 +69,8 @@ __event_newtable(struct event_pool *event_pool, int table_idx) } static int -__event_slot_alloc(struct event_pool *event_pool, int fd) +__event_slot_alloc(struct event_pool *event_pool, int fd, + char notify_poller_death) { int i = 0; int table_idx = -1; @@ -109,8 +113,15 @@ __event_slot_alloc(struct event_pool *event_pool, int fd) table[i].gen = gen + 1; LOCK_INIT(&table[i].lock); + INIT_LIST_HEAD(&table[i].poller_death); table[i].fd = fd; + if (notify_poller_death) { + table[i].idx = table_idx * EVENT_EPOLL_SLOTS + i; + list_add_tail(&table[i].poller_death, + &event_pool->poller_death); + } + event_pool->slots_used[table_idx]++; break; @@ -121,13 +132,14 @@ __event_slot_alloc(struct event_pool *event_pool, int fd) } static int -event_slot_alloc(struct event_pool *event_pool, int fd) +event_slot_alloc(struct event_pool *event_pool, int fd, + char notify_poller_death) { int idx = -1; pthread_mutex_lock(&event_pool->mutex); { - idx = __event_slot_alloc(event_pool, fd); + idx = __event_slot_alloc(event_pool, fd, notify_poller_death); } pthread_mutex_unlock(&event_pool->mutex); @@ -155,6 +167,7 @@ __event_slot_dealloc(struct event_pool *event_pool, int idx) slot->fd = -1; slot->handled_error = 0; slot->in_handler = 0; + list_del_init(&slot->poller_death); event_pool->slots_used[table_idx]--; return; @@ -172,6 +185,15 @@ event_slot_dealloc(struct event_pool *event_pool, int idx) return; } +static int +event_slot_ref(struct event_slot_epoll *slot) +{ + if (!slot) + return -1; + + return GF_ATOMIC_INC(slot->ref); +} + static struct event_slot_epoll * event_slot_get(struct event_pool *event_pool, int idx) { @@ -188,12 +210,41 @@ event_slot_get(struct event_pool *event_pool, int idx) return NULL; slot = &table[offset]; - GF_ATOMIC_INC(slot->ref); + event_slot_ref(slot); return slot; } static void +__event_slot_unref(struct event_pool *event_pool, struct event_slot_epoll *slot, + int idx) +{ + int ref = -1; + int fd = -1; + int do_close = 0; + + ref = GF_ATOMIC_DEC(slot->ref); + if (ref) + /* slot still alive */ + goto done; + + LOCK(&slot->lock); + { + fd = slot->fd; + do_close = slot->do_close; + slot->do_close = 0; + } + UNLOCK(&slot->lock); + + __event_slot_dealloc(event_pool, idx); + + if (do_close) + sys_close(fd); +done: + return; +} + +static void event_slot_unref(struct event_pool *event_pool, struct event_slot_epoll *slot, int idx) { @@ -248,7 +299,7 @@ event_pool_new_epoll(int count, int eventthreadcount) event_pool->fd = epfd; event_pool->count = count; - + INIT_LIST_HEAD(&event_pool->poller_death); event_pool->eventthreadcount = eventthreadcount; event_pool->auto_thread_count = 0; @@ -297,7 +348,7 @@ __slot_update_events(struct event_slot_epoll *slot, int poll_in, int poll_out) int event_register_epoll(struct event_pool *event_pool, int fd, event_handler_t handler, void *data, int poll_in, - int poll_out) + int poll_out, char notify_poller_death) { int idx = -1; int ret = -1; @@ -328,7 +379,7 @@ event_register_epoll(struct event_pool *event_pool, int fd, if (destroy == 1) goto out; - idx = event_slot_alloc(event_pool, fd); + idx = event_slot_alloc(event_pool, fd, notify_poller_death); if (idx == -1) { gf_msg("epoll", GF_LOG_ERROR, 0, LG_MSG_SLOT_NOT_FOUND, "could not find slot for fd=%d", fd); @@ -591,7 +642,7 @@ pre_unlock: ret = handler(fd, idx, gen, data, (event->events & (EPOLLIN | EPOLLPRI)), (event->events & (EPOLLOUT)), - (event->events & (EPOLLERR | EPOLLHUP))); + (event->events & (EPOLLERR | EPOLLHUP)), 0); } out: event_slot_unref(event_pool, slot, idx); @@ -607,7 +658,9 @@ event_dispatch_epoll_worker(void *data) struct event_thread_data *ev_data = data; struct event_pool *event_pool; int myindex = -1; - int timetodie = 0; + int timetodie = 0, gen = 0; + struct list_head poller_death_notify; + struct event_slot_epoll *slot = NULL, *tmp = NULL; GF_VALIDATE_OR_GOTO("event", ev_data, out); @@ -619,7 +672,7 @@ event_dispatch_epoll_worker(void *data) gf_msg("epoll", GF_LOG_INFO, 0, LG_MSG_STARTED_EPOLL_THREAD, "Started" " thread with index %d", - myindex); + myindex - 1); pthread_mutex_lock(&event_pool->mutex); { @@ -637,20 +690,55 @@ event_dispatch_epoll_worker(void *data) pthread_mutex_lock(&event_pool->mutex); { if (event_pool->eventthreadcount < myindex) { + while (event_pool->poller_death_sliced) { + pthread_cond_wait(&event_pool->cond, + &event_pool->mutex); + } + + INIT_LIST_HEAD(&poller_death_notify); /* if found true in critical section, * die */ event_pool->pollers[myindex - 1] = 0; event_pool->activethreadcount--; timetodie = 1; + gen = ++event_pool->poller_gen; + list_for_each_entry(slot, &event_pool->poller_death, + poller_death) + { + event_slot_ref(slot); + } + + list_splice_init(&event_pool->poller_death, + &poller_death_notify); + event_pool->poller_death_sliced = 1; pthread_cond_broadcast(&event_pool->cond); } } pthread_mutex_unlock(&event_pool->mutex); if (timetodie) { + list_for_each_entry(slot, &poller_death_notify, poller_death) + { + slot->handler(slot->fd, 0, gen, slot->data, 0, 0, 0, 1); + } + + pthread_mutex_lock(&event_pool->mutex); + { + list_for_each_entry_safe(slot, tmp, &poller_death_notify, + poller_death) + { + __event_slot_unref(event_pool, slot, slot->idx); + } + + list_splice(&poller_death_notify, + &event_pool->poller_death); + event_pool->poller_death_sliced = 0; + pthread_cond_broadcast(&event_pool->cond); + } + pthread_mutex_unlock(&event_pool->mutex); + gf_msg("epoll", GF_LOG_INFO, 0, LG_MSG_EXITED_EPOLL_THREAD, - "Exited " - "thread with index %d", - myindex); + "Exited thread with index %d", myindex); + goto out; } } |