diff options
| author | Shreyas Siravara <sshreyas@fb.com> | 2016-03-28 14:17:53 -0700 |
|---|---|---|
| committer | Jeff Darcy <jeff@pl.atyp.us> | 2017-09-12 15:12:21 +0000 |
| commit | 14e24da1eb59a85fe99c22bafd8641ca2b75a923 (patch) | |
| tree | e027476d10acffbd7d8415884901883f47fae2b7 /rpc/rpc-transport/socket/src/socket.c | |
| parent | 60b35dbfa42a65d81a18efda2776c0e733c4e769 (diff) | |
event: Idle connection management
Summary:
- This diff adds support for detecting and tracking idle client connections.
- It allows *service translators* (server, nfs) to opt-in to detect and close idle client connections.
- Right now it explicitly restricts the service to NFS as a safety.
Here are the debug logs when a client connection gets closed:
[2016-03-29 17:27:06.154232] W [socket.c:2426:socket_timeout_handler] 0-socket: Shutting down idle client connection (idle=20s,fd=20,conn=[2401:db00:11:d0af:face:0:3:0:957]->[2401:db00:11:d0af:face:0:3:0:2049])!
[2016-03-29 17:27:06.154292] D [event-epoll.c:655:__event_epoll_timeout_slot] 0-epoll: Connection on slot->fd=9 was idle for 20 seconds!
[2016-03-29 17:27:06.163282] D [socket.c:629:__socket_rwv] 0-socket.nfs-server: EOF on socket
[2016-03-29 17:27:06.163298] D [socket.c:2474:socket_event_handler] 0-transport: disconnecting now
[2016-03-29 17:27:06.163316] D [event-epoll.c:614:event_dispatch_epoll_handler] 0-epoll: generation bumped on idx=9 from gen=4 to slot->gen=5, fd=20, slot->fd=20
Test Plan: - Used stuck NFS mounts to create idle clients and unstuck them.
Reviewers: kvigor, rwareing
Reviewed By: rwareing
Subscribers: dld, moox, dph
Differential Revision: https://phabricator.fb.com/D3112099
Change-Id: Ic06c89e03f87daabab7f07f892390edd1a1fcc20
Signed-off-by: Jeff Darcy <jdarcy@fb.com>
Reviewed-on: https://review.gluster.org/18265
Reviewed-by: Jeff Darcy <jeff@pl.atyp.us>
Tested-by: Jeff Darcy <jeff@pl.atyp.us>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Smoke: Gluster Build System <jenkins@build.gluster.org>
Diffstat (limited to 'rpc/rpc-transport/socket/src/socket.c')
| -rw-r--r-- | rpc/rpc-transport/socket/src/socket.c | 117 |
1 files changed, 91 insertions, 26 deletions
diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c index 40a25bdba83..d63f56ed385 100644 --- a/rpc/rpc-transport/socket/src/socket.c +++ b/rpc/rpc-transport/socket/src/socket.c @@ -2353,6 +2353,76 @@ out: static int socket_disconnect (rpc_transport_t *this); +/** + * Special event handler for sockets that are idle-ing. + * + * @fd: Socket file descriptor. + * @data: Usually an rpc_transport_t * + * @idle_time: How long a fd (connection) has been idle + * @event_pool: For almost anything else you'll need to stash + */ +static int +socket_timeout_handler (int fd, void *data, time_t idle_time, struct event_pool *event_pool) +{ + int ret = 0; + char *colon = NULL; + char *peer_addr = NULL; + size_t host_len = 0; + short port = 0; + rpc_transport_t *transport = NULL; + int do_idle_close = 0; + + transport = data; + do_idle_close = event_pool->close_idle_conns; + + /** + * Are we a listener? (aka NFS?) if not, we shouldn't do anything. + */ + if (!transport->listener) { + goto out; + } + + peer_addr = transport->myinfo.identifier; + colon = strrchr (peer_addr, ':'); + if (!colon) { + ret = -EINVAL; + goto out; + } + + port = atoi (colon + 1); + + /* + * Restrict this behavior to NFS only! + */ + if (port != GF_NFS3_PORT) { + ret = -EPROTONOSUPPORT; + goto out; + } + + /* + * We should only close the client connection if the slot was marked + * with 'do_idle_close'. This is usually set through a vol option that + * propagates to the event pool. + */ + if (do_idle_close == 1) { + gf_log ("socket", GF_LOG_WARNING, + "Shutting down idle client connection " + "(idle=%lus,fd=%d,conn=[%s]->[%s])!", + idle_time, fd, transport->peerinfo.identifier, + transport->myinfo.identifier); + ret = shutdown (fd, SHUT_RDWR); + } else { + gf_log ("socket", GF_LOG_WARNING, + "Found idle client connection " + "(idle=%lus,fd=%d,conn=[%s]->[%s])!", + idle_time, fd, transport->peerinfo.identifier, + transport->myinfo.identifier); + } + +out: + return ret; +} + /* reads rpc_requests during pollin */ static int socket_event_handler (int fd, int idx, void *data, @@ -2803,31 +2873,24 @@ socket_server_event_handler (int fd, int idx, void *data, new_priv->is_server = _gf_true; rpc_transport_ref (new_trans); - if (new_priv->own_thread) { - if (pipe(new_priv->pipe) < 0) { - gf_log(this->name, GF_LOG_ERROR, - "could not create pipe"); - } - ret = socket_spawn(new_trans); - if (ret) { - gf_log(this->name, GF_LOG_ERROR, - "could not spawn thread"); - sys_close (new_priv->pipe[0]); - sys_close (new_priv->pipe[1]); - } - } else { - new_priv->idx = - event_register (ctx->event_pool, - new_sock, - socket_event_handler, + if (new_priv->own_thread) { + if (pipe(new_priv->pipe) < 0) { + gf_log(this->name,GF_LOG_ERROR, + "could not create pipe"); + } + socket_spawn(new_trans); + } + else { + new_priv->idx = + event_register (ctx->event_pool, + new_sock, + socket_event_handler, + socket_timeout_handler, new_trans, - 1, 0); - if (new_priv->idx == -1) { - ret = -1; - gf_log(this->name, GF_LOG_ERROR, - "failed to register the socket with event"); - } - } + 1, 0); + if (new_priv->idx == -1) + ret = -1; + } } pthread_mutex_unlock (&new_priv->lock); @@ -3200,6 +3263,7 @@ handler: else { priv->idx = event_register (ctx->event_pool, priv->sock, socket_event_handler, + socket_timeout_handler, this, 1, 1); if (priv->idx == -1) { gf_log ("", GF_LOG_WARNING, @@ -3375,6 +3439,7 @@ socket_listen (rpc_transport_t *this) priv->idx = event_register (ctx->event_pool, priv->sock, socket_server_event_handler, + NULL, this, 1, 0); if (priv->idx == -1) { @@ -3491,8 +3556,8 @@ socket_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) if (priv->connected != 1) { if (!priv->submit_log && !priv->connect_finish_log) { gf_log (this->name, GF_LOG_INFO, - "not connected (priv->connected = %d)", - priv->connected); + "sock %d not connected (priv->connected = %d)", + priv->sock, priv->connected); priv->submit_log = 1; } goto unlock; |
