summaryrefslogtreecommitdiffstats
path: root/libglusterfs/src/event.h
diff options
context:
space:
mode:
authorRaghavendra G <rgowdapp@redhat.com>2017-05-05 15:21:30 +0530
committerRaghavendra G <rgowdapp@redhat.com>2017-05-12 05:26:42 +0000
commitcea8b702506ff914deadd056f4b7dd20a3ca7670 (patch)
tree954ca7e37696d57725d06343168bf7c6ed8bf22d /libglusterfs/src/event.h
parent333474e0d6efe1a2b3a9ecffc9bdff3e49325910 (diff)
event/epoll: Add back socket for polling of events immediately after
reading the entire rpc message from the wire Currently socket is added back for future events after higher layers (rpc, xlators etc) have processed the message. If message processing involves signficant delay (as in writev replies processed by Erasure Coding), performance takes hit. Hence this patch modifies transport/socket to add back the socket for polling of events immediately after reading the entire rpc message, but before notification to higher layers. credits: Thanks to "Kotresh Hiremath Ravishankar" <khiremat@redhat.com> for assitance in fixing a regression in bitrot caused by this patch. Change-Id: I04b6b9d0b51a1cfb86ecac3c3d87a5f388cf5800 BUG: 1448364 Signed-off-by: Raghavendra G <rgowdapp@redhat.com> Reviewed-on: https://review.gluster.org/15036 CentOS-regression: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> Smoke: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Amar Tumballi <amarts@redhat.com>
Diffstat (limited to 'libglusterfs/src/event.h')
-rw-r--r--libglusterfs/src/event.h11
1 files changed, 10 insertions, 1 deletions
diff --git a/libglusterfs/src/event.h b/libglusterfs/src/event.h
index 1348f5d05c0..c60b14ad04b 100644
--- a/libglusterfs/src/event.h
+++ b/libglusterfs/src/event.h
@@ -23,7 +23,7 @@ struct event_data {
} __attribute__ ((__packed__, __may_alias__));
-typedef int (*event_handler_t) (int fd, int idx, void *data,
+typedef int (*event_handler_t) (int fd, int idx, int gen, void *data,
int poll_in, int poll_out, int poll_err);
#define EVENT_EPOLL_TABLES 1024
@@ -73,6 +73,11 @@ struct event_pool {
};
+struct event_destroy_data {
+ int readfd;
+ struct event_pool *pool;
+};
+
struct event_ops {
struct event_pool * (*new) (int count, int eventthreadcount);
@@ -93,6 +98,8 @@ struct event_ops {
int (*event_reconfigure_threads) (struct event_pool *event_pool,
int newcount);
int (*event_pool_destroy) (struct event_pool *event_pool);
+ int (*event_handled) (struct event_pool *event_pool, int fd, int idx,
+ int gen);
};
struct event_pool *event_pool_new (int count, int eventthreadcount);
@@ -107,4 +114,6 @@ int event_dispatch (struct event_pool *event_pool);
int event_reconfigure_threads (struct event_pool *event_pool, int value);
int event_pool_destroy (struct event_pool *event_pool);
int event_dispatch_destroy (struct event_pool *event_pool);
+int event_handled (struct event_pool *event_pool, int fd, int idx, int gen);
+
#endif /* _EVENT_H_ */