diff options
Diffstat (limited to 'rpc/rpc-transport')
| -rw-r--r-- | rpc/rpc-transport/rdma/src/Makefile.am | 10 | ||||
| -rw-r--r-- | rpc/rpc-transport/rdma/src/name.c | 80 | ||||
| -rw-r--r-- | rpc/rpc-transport/rdma/src/name.h | 9 | ||||
| -rw-r--r-- | rpc/rpc-transport/rdma/src/rdma.c | 3145 | ||||
| -rw-r--r-- | rpc/rpc-transport/rdma/src/rdma.h | 105 | ||||
| -rw-r--r-- | rpc/rpc-transport/socket/src/Makefile.am | 10 | ||||
| -rw-r--r-- | rpc/rpc-transport/socket/src/name.c | 56 | ||||
| -rw-r--r-- | rpc/rpc-transport/socket/src/socket.c | 1879 | ||||
| -rw-r--r-- | rpc/rpc-transport/socket/src/socket.h | 109 |
9 files changed, 3005 insertions, 2398 deletions
diff --git a/rpc/rpc-transport/rdma/src/Makefile.am b/rpc/rpc-transport/rdma/src/Makefile.am index b4b940bca..2bf7cf238 100644 --- a/rpc/rpc-transport/rdma/src/Makefile.am +++ b/rpc/rpc-transport/rdma/src/Makefile.am @@ -3,18 +3,20 @@ transport_LTLIBRARIES = rdma.la transportdir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/rpc-transport -rdma_la_LDFLAGS = -module -avoidversion +rdma_la_LDFLAGS = -module -avoid-version rdma_la_SOURCES = rdma.c name.c rdma_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \ - -libverbs + -libverbs -lrdmacm noinst_HEADERS = rdma.h name.h -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src/ \ -I$(top_srcdir)/xlators/protocol/lib/src/ -shared -nostartfiles $(GF_CFLAGS) -AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS) \ +AM_CPPFLAGS = $(GF_CPPFLAGS) \ -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src/ \ - -I$(top_srcdir)/rpc/xdr/src -shared -nostartfiles $(GF_CFLAGS) + -I$(top_srcdir)/rpc/xdr/src + +AM_CFLAGS = -Wall $(GF_CFLAGS) CLEANFILES = *~ diff --git a/rpc/rpc-transport/rdma/src/name.c b/rpc/rpc-transport/rdma/src/name.c index e5f7ba2ca..c57428ad6 100644 --- a/rpc/rpc-transport/rdma/src/name.c +++ b/rpc/rpc-transport/rdma/src/name.c @@ -13,12 +13,7 @@ #include <errno.h> #include <netdb.h> #include <string.h> - -#ifdef CLIENT_PORT_CEILING -#undef CLIENT_PORT_CEILING -#endif - -#define CLIENT_PORT_CEILING 1024 +#include <rdma/rdma_cma.h> #ifndef AF_INET_SDP #define AF_INET_SDP 27 @@ -26,6 +21,8 @@ #include "rpc-transport.h" #include "rdma.h" +#include "common-utils.h" + int32_t gf_resolve_ip6 (const char *hostname, @@ -35,28 +32,43 @@ gf_resolve_ip6 (const char *hostname, struct addrinfo **addr_info); static int32_t -af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr, +af_inet_bind_to_port_lt_ceiling (struct rdma_cm_id *cm_id, + struct sockaddr *sockaddr, socklen_t sockaddr_len, int ceiling) { - int32_t ret = -1; - /* struct sockaddr_in sin = {0, }; */ - uint16_t port = ceiling - 1; + int32_t ret = -1; + uint16_t port = ceiling - 1; + // by default assume none of the ports are blocked and all are available + gf_boolean_t ports[1024] = {_gf_false,}; + int i = 0; + + ret = gf_process_reserved_ports (ports); + if (ret != 0) { + for (i = 0; i < 1024; i++) + ports[i] = _gf_false; + } while (port) { switch (sockaddr->sa_family) { case AF_INET6: - ((struct sockaddr_in6 *)sockaddr)->sin6_port = htons (port); + ((struct sockaddr_in6 *)sockaddr)->sin6_port + = htons (port); break; case AF_INET_SDP: case AF_INET: - ((struct sockaddr_in *)sockaddr)->sin_port = htons (port); + ((struct sockaddr_in *)sockaddr)->sin_port + = htons (port); break; } - - ret = bind (fd, sockaddr, sockaddr_len); + // ignore the reserved ports + if (ports[port] == _gf_true) { + port--; + continue; + } + ret = rdma_bind_addr (cm_id, sockaddr); if (ret == 0) break; @@ -70,11 +82,10 @@ af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr, return ret; } +#if 0 static int32_t -af_unix_client_bind (rpc_transport_t *this, - struct sockaddr *sockaddr, - socklen_t sockaddr_len, - int sock) +af_unix_client_bind (rpc_transport_t *this, struct sockaddr *sockaddr, + socklen_t sockaddr_len, struct rdma_cm_id *cm_id) { data_t *path_data = NULL; struct sockaddr_un *addr = NULL; @@ -106,6 +117,7 @@ af_unix_client_bind (rpc_transport_t *this, err: return ret; } +#endif static int32_t client_fill_address_family (rpc_transport_t *this, struct sockaddr *sockaddr) @@ -153,13 +165,11 @@ client_fill_address_family (rpc_transport_t *this, struct sockaddr *sockaddr) sockaddr->sa_family = AF_INET6; } else if (!strcasecmp (address_family, "inet-sdp")) { sockaddr->sa_family = AF_INET_SDP; - } else if (!strcasecmp (address_family, "inet/inet6") - || !strcasecmp (address_family, "inet6/inet")) { - sockaddr->sa_family = AF_UNSPEC; } else { gf_log (this->name, GF_LOG_ERROR, "unknown address-family (%s) specified", address_family); + sockaddr->sa_family = AF_UNSPEC; return -1; } } @@ -354,6 +364,8 @@ af_inet_server_get_local_sockaddr (rpc_transport_t *this, if (listen_port_data) { listen_port = data_to_uint16 (listen_port_data); } else { + listen_port = GF_DEFAULT_RDMA_LISTEN_PORT; + if (addr->sa_family == AF_INET6) { struct sockaddr_in6 *in = (struct sockaddr_in6 *) addr; in->sin6_addr = in6addr_any; @@ -404,10 +416,8 @@ out: } int32_t -gf_rdma_client_bind (rpc_transport_t *this, - struct sockaddr *sockaddr, - socklen_t *sockaddr_len, - int sock) +gf_rdma_client_bind (rpc_transport_t *this, struct sockaddr *sockaddr, + socklen_t *sockaddr_len, struct rdma_cm_id *cm_id) { int ret = 0; @@ -419,22 +429,24 @@ gf_rdma_client_bind (rpc_transport_t *this, *sockaddr_len = sizeof (struct sockaddr_in); case AF_INET6: - ret = af_inet_bind_to_port_lt_ceiling (sock, sockaddr, + ret = af_inet_bind_to_port_lt_ceiling (cm_id, sockaddr, *sockaddr_len, - CLIENT_PORT_CEILING); + GF_CLIENT_PORT_CEILING); if (ret == -1) { gf_log (this->name, GF_LOG_WARNING, - "cannot bind inet socket (%d) to port " - "less than %d (%s)", - sock, CLIENT_PORT_CEILING, strerror (errno)); + "cannot bind rdma_cm_id to port " + "less than %d (%s)", GF_CLIENT_PORT_CEILING, + strerror (errno)); ret = 0; } break; case AF_UNIX: *sockaddr_len = sizeof (struct sockaddr_un); +#if 0 ret = af_unix_client_bind (this, (struct sockaddr *)sockaddr, *sockaddr_len, sock); +#endif break; default: @@ -521,21 +533,19 @@ gf_rdma_server_get_local_sockaddr (rpc_transport_t *this, addr->sa_family = AF_INET_SDP; } else if (!strcasecmp (address_family, "unix")) { addr->sa_family = AF_UNIX; - } else if (!strcasecmp (address_family, "inet/inet6") - || !strcasecmp (address_family, "inet6/inet")) { - addr->sa_family = AF_UNSPEC; } else { gf_log (this->name, GF_LOG_ERROR, "unknown address family (%s) specified", address_family); + addr->sa_family = AF_UNSPEC; ret = -1; goto err; } } else { gf_log (this->name, GF_LOG_DEBUG, "option address-family not specified, defaulting " - "to inet/inet6"); - addr->sa_family = AF_UNSPEC; + "to inet"); + addr->sa_family = AF_INET; } switch (addr->sa_family) diff --git a/rpc/rpc-transport/rdma/src/name.h b/rpc/rpc-transport/rdma/src/name.h index 114ed1661..742fc5fc3 100644 --- a/rpc/rpc-transport/rdma/src/name.h +++ b/rpc/rpc-transport/rdma/src/name.h @@ -11,16 +11,13 @@ #ifndef _IB_VERBS_NAME_H #define _IB_VERBS_NAME_H -#include <sys/socket.h> -#include <sys/un.h> +#include <rdma/rdma_cma.h> #include "compat.h" int32_t -gf_rdma_client_bind (rpc_transport_t *this, - struct sockaddr *sockaddr, - socklen_t *sockaddr_len, - int sock); +gf_rdma_client_bind (rpc_transport_t *this, struct sockaddr *sockaddr, + socklen_t *sockaddr_len, struct rdma_cm_id *cm_id); int32_t gf_rdma_client_get_remote_sockaddr (rpc_transport_t *this, diff --git a/rpc/rpc-transport/rdma/src/rdma.c b/rpc/rpc-transport/rdma/src/rdma.c index 8e9472fc6..6e6099a98 100644 --- a/rpc/rpc-transport/rdma/src/rdma.c +++ b/rpc/rpc-transport/rdma/src/rdma.c @@ -8,7 +8,6 @@ cases as published by the Free Software Foundation. */ - #ifndef _CONFIG_H #define _CONFIG_H #include "config.h" @@ -21,6 +20,7 @@ #include "name.h" #include "byte-order.h" #include "xlator.h" +#include "xdr-rpc.h" #include <signal.h> #define GF_RDMA_LOG_NAME "rpc-transport/rdma" @@ -34,99 +34,29 @@ gf_rdma_post_ref (gf_rdma_post_t *post); int gf_rdma_post_unref (gf_rdma_post_t *post); -int32_t -gf_resolve_ip6 (const char *hostname, - uint16_t port, - int family, - void **dnscache, - struct addrinfo **addr_info); - -static uint16_t -gf_rdma_get_local_lid (struct ibv_context *context, - int32_t port) -{ - struct ibv_port_attr attr; +static void * +gf_rdma_send_completion_proc (void *data); - if (ibv_query_port (context, port, &attr)) - return 0; +static void * +gf_rdma_recv_completion_proc (void *data); - return attr.lid; -} - -static const char * -get_port_state_str(enum ibv_port_state pstate) -{ - switch (pstate) { - case IBV_PORT_DOWN: return "PORT_DOWN"; - case IBV_PORT_INIT: return "PORT_INIT"; - case IBV_PORT_ARMED: return "PORT_ARMED"; - case IBV_PORT_ACTIVE: return "PORT_ACTIVE"; - case IBV_PORT_ACTIVE_DEFER: return "PORT_ACTIVE_DEFER"; - default: return "invalid state"; - } -} +void * +gf_rdma_async_event_thread (void *context); static int32_t -ib_check_active_port (struct ibv_context *ctx, uint8_t port) -{ - struct ibv_port_attr port_attr = {0, }; - int32_t ret = 0; - const char *state_str = NULL; - - if (!ctx) { - gf_log_callingfn (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "Error in supplied context"); - return -1; - } - - ret = ibv_query_port (ctx, port, &port_attr); - - if (ret) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "Failed to query port %u properties", port); - return -1; - } - - state_str = get_port_state_str (port_attr.state); - gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE, - "Infiniband PORT: (%u) STATE: (%s)", - port, state_str); - - if (port_attr.state == IBV_PORT_ACTIVE) - return 0; - - return -1; -} +gf_rdma_create_qp (rpc_transport_t *this); static int32_t -ib_get_active_port (struct ibv_context *ib_ctx) -{ - struct ibv_device_attr ib_device_attr = {{0, }, }; - int32_t ret = -1; - uint8_t ib_port = 0; +__gf_rdma_teardown (rpc_transport_t *this); - if (!ib_ctx) { - gf_log_callingfn (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "Error in supplied context"); - return -1; - } - if (ibv_query_device (ib_ctx, &ib_device_attr)) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "Failed to query device properties"); - return -1; - } +static int32_t +gf_rdma_teardown (rpc_transport_t *this); - for (ib_port = 1; ib_port <= ib_device_attr.phys_port_cnt; ++ib_port) { - ret = ib_check_active_port (ib_ctx, ib_port); - if (ret == 0) - return ib_port; +static int32_t +gf_rdma_disconnect (rpc_transport_t *this); - gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE, - "Port:(%u) not active", ib_port); - continue; - } - return ret; -} +static void +gf_rdma_cm_handle_disconnect (rpc_transport_t *this); static void @@ -156,7 +86,7 @@ gf_rdma_put_post (gf_rdma_queue_t *queue, gf_rdma_post_t *post) static gf_rdma_post_t * -gf_rdma_new_post (gf_rdma_device_t *device, int32_t len, +gf_rdma_new_post (rpc_transport_t *this, gf_rdma_device_t *device, int32_t len, gf_rdma_post_type_t type) { gf_rdma_post_t *post = NULL; @@ -183,8 +113,9 @@ gf_rdma_new_post (gf_rdma_device_t *device, int32_t len, post->buf_size, IBV_ACCESS_LOCAL_WRITE); if (!post->mr) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "memory registration failed"); + gf_log (this->name, GF_LOG_WARNING, + "memory registration failed (%s)", + strerror (errno)); goto out; } @@ -194,9 +125,7 @@ gf_rdma_new_post (gf_rdma_device_t *device, int32_t len, ret = 0; out: if (ret != 0) { - if (post->buf != NULL) { - free (post->buf); - } + free (post->buf); GF_FREE (post); post = NULL; @@ -259,22 +188,6 @@ __gf_rdma_quota_get (gf_rdma_peer_t *peer) return ret; } -/* - static int32_t - gf_rdma_quota_get (gf_rdma_peer_t *peer) - { - int32_t ret = -1; - gf_rdma_private_t *priv = peer->trans->private; - - pthread_mutex_lock (&priv->write_mutex); - { - ret = __gf_rdma_quota_get (peer); - } - pthread_mutex_unlock (&priv->write_mutex); - - return ret; - } -*/ static void __gf_rdma_ioq_entry_free (gf_rdma_ioq_t *entry) @@ -290,6 +203,7 @@ __gf_rdma_ioq_entry_free (gf_rdma_ioq_t *entry) iobref_unref (entry->msg.request.rsp_iobref); entry->msg.request.rsp_iobref = NULL; } + mem_put (entry); } @@ -309,26 +223,898 @@ static int32_t __gf_rdma_disconnect (rpc_transport_t *this) { gf_rdma_private_t *priv = NULL; - int32_t ret = 0; priv = this->private; - if (priv->connected || priv->tcp_connected) { - fcntl (priv->sock, F_SETFL, O_NONBLOCK); - if (shutdown (priv->sock, SHUT_RDWR) != 0) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "shutdown () - error: %s", - strerror (errno)); - ret = -errno; - priv->tcp_connected = 0; + if (priv->connected) { + rdma_disconnect (priv->peer.cm_id); + } + + return 0; +} + + +static void +gf_rdma_queue_init (gf_rdma_queue_t *queue) +{ + pthread_mutex_init (&queue->lock, NULL); + + queue->active_posts.next = &queue->active_posts; + queue->active_posts.prev = &queue->active_posts; + queue->passive_posts.next = &queue->passive_posts; + queue->passive_posts.prev = &queue->passive_posts; +} + + +static void +__gf_rdma_destroy_queue (gf_rdma_post_t *post) +{ + gf_rdma_post_t *tmp = NULL; + + while (post->next != post) { + tmp = post->next; + + post->next = post->next->next; + post->next->prev = post; + + gf_rdma_destroy_post (tmp); + } +} + + +static void +gf_rdma_destroy_queue (gf_rdma_queue_t *queue) +{ + if (queue == NULL) { + goto out; + } + + pthread_mutex_lock (&queue->lock); + { + if (queue->passive_count > 0) { + __gf_rdma_destroy_queue (&queue->passive_posts); + queue->passive_count = 0; + } + + if (queue->active_count > 0) { + __gf_rdma_destroy_queue (&queue->active_posts); + queue->active_count = 0; + } + } + pthread_mutex_unlock (&queue->lock); + +out: + return; +} + + +static void +gf_rdma_destroy_posts (rpc_transport_t *this) +{ + gf_rdma_device_t *device = NULL; + gf_rdma_private_t *priv = NULL; + + if (this == NULL) { + goto out; + } + + priv = this->private; + device = priv->device; + + gf_rdma_destroy_queue (&device->sendq); + gf_rdma_destroy_queue (&device->recvq); + +out: + return; +} + + +static int32_t +__gf_rdma_create_posts (rpc_transport_t *this, int32_t count, int32_t size, + gf_rdma_queue_t *q, gf_rdma_post_type_t type) +{ + int32_t i = 0; + int32_t ret = 0; + gf_rdma_private_t *priv = NULL; + gf_rdma_device_t *device = NULL; + + priv = this->private; + device = priv->device; + + for (i=0 ; i<count ; i++) { + gf_rdma_post_t *post = NULL; + + post = gf_rdma_new_post (this, device, size + 2048, type); + if (!post) { + gf_log (this->name, GF_LOG_ERROR, + "post creation failed"); + ret = -1; + break; + } + + gf_rdma_put_post (q, post); + } + return ret; +} + + +static int32_t +gf_rdma_post_recv (struct ibv_srq *srq, + gf_rdma_post_t *post) +{ + struct ibv_sge list = { + .addr = (unsigned long) post->buf, + .length = post->buf_size, + .lkey = post->mr->lkey + }; + + struct ibv_recv_wr wr = { + .wr_id = (unsigned long) post, + .sg_list = &list, + .num_sge = 1, + }, *bad_wr; + + gf_rdma_post_ref (post); + + return ibv_post_srq_recv (srq, &wr, &bad_wr); +} + + +static int32_t +gf_rdma_create_posts (rpc_transport_t *this) +{ + int32_t i = 0, ret = 0; + gf_rdma_post_t *post = NULL; + gf_rdma_private_t *priv = NULL; + gf_rdma_options_t *options = NULL; + gf_rdma_device_t *device = NULL; + + priv = this->private; + options = &priv->options; + device = priv->device; + + ret = __gf_rdma_create_posts (this, options->send_count, + options->send_size, + &device->sendq, GF_RDMA_SEND_POST); + if (!ret) + ret = __gf_rdma_create_posts (this, options->recv_count, + options->recv_size, + &device->recvq, + GF_RDMA_RECV_POST); + + if (!ret) { + for (i=0 ; i<options->recv_count ; i++) { + post = gf_rdma_get_post (&device->recvq); + if (gf_rdma_post_recv (device->srq, post) != 0) { + ret = -1; + break; + } + } + } + + if (ret) + gf_rdma_destroy_posts (this); + + return ret; +} + + +static void +gf_rdma_destroy_cq (rpc_transport_t *this) +{ + gf_rdma_private_t *priv = NULL; + gf_rdma_device_t *device = NULL; + + priv = this->private; + device = priv->device; + + if (device->recv_cq) + ibv_destroy_cq (device->recv_cq); + device->recv_cq = NULL; + + if (device->send_cq) + ibv_destroy_cq (device->send_cq); + device->send_cq = NULL; + + return; +} + + +static int32_t +gf_rdma_create_cq (rpc_transport_t *this) +{ + gf_rdma_private_t *priv = NULL; + gf_rdma_options_t *options = NULL; + gf_rdma_device_t *device = NULL; + uint64_t send_cqe = 0; + int32_t ret = 0; + struct ibv_device_attr device_attr = {{0}, }; + + priv = this->private; + options = &priv->options; + device = priv->device; + + device->recv_cq = ibv_create_cq (priv->device->context, + options->recv_count * 2, + device, + device->recv_chan, + 0); + if (!device->recv_cq) { + gf_log (this->name, GF_LOG_ERROR, + "creation of CQ for device %s failed", + device->device_name); + ret = -1; + goto out; + } else if (ibv_req_notify_cq (device->recv_cq, 0)) { + gf_log (this->name, GF_LOG_ERROR, + "ibv_req_notify_cq on recv CQ of device %s failed", + device->device_name); + ret = -1; + goto out; + } + + do { + ret = ibv_query_device (priv->device->context, &device_attr); + if (ret != 0) { + gf_log (this->name, GF_LOG_ERROR, + "ibv_query_device on %s returned %d (%s)", + priv->device->device_name, ret, + (ret > 0) ? strerror (ret) : ""); + ret = -1; + goto out; + } + + send_cqe = options->send_count * 128; + send_cqe = (send_cqe > device_attr.max_cqe) + ? device_attr.max_cqe : send_cqe; + + /* TODO: make send_cq size dynamically adaptive */ + device->send_cq = ibv_create_cq (priv->device->context, + send_cqe, device, + device->send_chan, 0); + if (!device->send_cq) { + gf_log (this->name, GF_LOG_ERROR, + "creation of send_cq for device %s failed", + device->device_name); + ret = -1; + goto out; + } + + if (ibv_req_notify_cq (device->send_cq, 0)) { + gf_log (this->name, GF_LOG_ERROR, + "ibv_req_notify_cq on send_cq for device %s" + " failed", device->device_name); + ret = -1; + goto out; + } + } while (0); + +out: + if (ret != 0) + gf_rdma_destroy_cq (this); + + return ret; +} + + +static gf_rdma_device_t * +gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx, + char *device_name) +{ + glusterfs_ctx_t *ctx = NULL; + gf_rdma_private_t *priv = NULL; + gf_rdma_options_t *options = NULL; + int32_t ret = 0; + int32_t i = 0; + gf_rdma_device_t *trav = NULL, *device = NULL; + gf_rdma_ctx_t *rdma_ctx = NULL; + + priv = this->private; + options = &priv->options; + ctx = this->ctx; + rdma_ctx = ctx->ib; + + trav = rdma_ctx->device; + + while (trav) { + if (!strcmp (trav->device_name, device_name)) + break; + trav = trav->next; + } + + if (!trav) { + trav = GF_CALLOC (1, sizeof (*trav), + gf_common_mt_rdma_device_t); + if (trav == NULL) { + goto out; + } + + priv->device = trav; + trav->context = ibctx; + + trav->request_ctx_pool + = mem_pool_new (gf_rdma_request_context_t, + GF_RDMA_POOL_SIZE); + if (trav->request_ctx_pool == NULL) { + goto out; + } + + trav->ioq_pool + = mem_pool_new (gf_rdma_ioq_t, GF_RDMA_POOL_SIZE); + if (trav->ioq_pool == NULL) { + goto out; + } + + trav->reply_info_pool = mem_pool_new (gf_rdma_reply_info_t, + GF_RDMA_POOL_SIZE); + if (trav->reply_info_pool == NULL) { + goto out; + } + + trav->device_name = gf_strdup (device_name); + + trav->next = rdma_ctx->device; + rdma_ctx->device = trav; + + trav->send_chan = ibv_create_comp_channel (trav->context); + if (!trav->send_chan) { + gf_log (this->name, GF_LOG_ERROR, + "could not create send completion channel for " + "device (%s)", device_name); + goto out; + } + + trav->recv_chan = ibv_create_comp_channel (trav->context); + if (!trav->recv_chan) { + gf_log (this->name, GF_LOG_ERROR, + "could not create recv completion channel for " + "device (%s)", device_name); + + /* TODO: cleanup current mess */ + goto out; + } + + if (gf_rdma_create_cq (this) < 0) { + gf_log (this->name, GF_LOG_ERROR, + "could not create CQ for device (%s)", + device_name); + goto out; + } + + /* protection domain */ + trav->pd = ibv_alloc_pd (trav->context); + + if (!trav->pd) { + gf_log (this->name, GF_LOG_ERROR, + "could not allocate protection domain for " + "device (%s)", device_name); + goto out; + } + + struct ibv_srq_init_attr attr = { + .attr = { + .max_wr = options->recv_count, + .max_sge = 1, + .srq_limit = 10 + } + }; + trav->srq = ibv_create_srq (trav->pd, &attr); + + if (!trav->srq) { + gf_log (this->name, GF_LOG_ERROR, + "could not create SRQ for device (%s)", + device_name); + goto out; + } + + /* queue init */ + gf_rdma_queue_init (&trav->sendq); + gf_rdma_queue_init (&trav->recvq); + + if (gf_rdma_create_posts (this) < 0) { + gf_log (this->name, GF_LOG_ERROR, + "could not allocate posts for device (%s)", + device_name); + goto out; + } + + /* completion threads */ + ret = gf_thread_create (&trav->send_thread, NULL, + gf_rdma_send_completion_proc, + trav->send_chan); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "could not create send completion thread for " + "device (%s)", device_name); + goto out; + } + + ret = gf_thread_create (&trav->recv_thread, NULL, + gf_rdma_recv_completion_proc, + trav->recv_chan); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "could not create recv completion thread " + "for device (%s)", device_name); + return NULL; + } + + ret = gf_thread_create (&trav->async_event_thread, NULL, + gf_rdma_async_event_thread, + ibctx); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, + "could not create async_event_thread"); + return NULL; + } + + /* qpreg */ + pthread_mutex_init (&trav->qpreg.lock, NULL); + for (i=0; i<42; i++) { + trav->qpreg.ents[i].next = &trav->qpreg.ents[i]; + trav->qpreg.ents[i].prev = &trav->qpreg.ents[i]; + } + } + + device = trav; + trav = NULL; +out: + + if (trav != NULL) { + gf_rdma_destroy_posts (this); + mem_pool_destroy (trav->ioq_pool); + mem_pool_destroy (trav->request_ctx_pool); + mem_pool_destroy (trav->reply_info_pool); + ibv_dealloc_pd (trav->pd); + gf_rdma_destroy_cq (this); + ibv_destroy_comp_channel (trav->recv_chan); + ibv_destroy_comp_channel (trav->send_chan); + GF_FREE ((char *)trav->device_name); + GF_FREE (trav); + } + + return device; +} + + +static rpc_transport_t * +gf_rdma_transport_new (rpc_transport_t *listener, struct rdma_cm_id *cm_id) +{ + gf_rdma_private_t *listener_priv = NULL, *priv = NULL; + rpc_transport_t *this = NULL, *new = NULL; + gf_rdma_options_t *options = NULL; + char *device_name = NULL; + + listener_priv = listener->private; + + this = GF_CALLOC (1, sizeof (rpc_transport_t), + gf_common_mt_rpc_transport_t); + if (this == NULL) { + goto out; + } + + this->listener = listener; + + priv = GF_CALLOC (1, sizeof (gf_rdma_private_t), + gf_common_mt_rdma_private_t); + if (priv == NULL) { + goto out; + } + + this->private = priv; + priv->options = listener_priv->options; + + priv->listener = listener; + priv->entity = GF_RDMA_SERVER; + + options = &priv->options; + + this->ops = listener->ops; + this->init = listener->init; + this->fini = listener->fini; + this->ctx = listener->ctx; + this->name = gf_strdup (listener->name); + this->notify = listener->notify; + this->mydata = listener->mydata; + + this->myinfo.sockaddr_len = sizeof (cm_id->route.addr.src_addr); + memcpy (&this->myinfo.sockaddr, &cm_id->route.addr.src_addr, + this->myinfo.sockaddr_len); + + this->peerinfo.sockaddr_len = sizeof (cm_id->route.addr.dst_addr); + memcpy (&this->peerinfo.sockaddr, &cm_id->route.addr.dst_addr, + this->peerinfo.sockaddr_len); + + priv->peer.trans = this; + gf_rdma_get_transport_identifiers (this); + + device_name = (char *)ibv_get_device_name (cm_id->verbs->device); + if (device_name == NULL) { + gf_log (listener->name, GF_LOG_WARNING, + "cannot get device name (peer:%s me:%s)", + this->peerinfo.identifier, this->myinfo.identifier); + goto out; + } + + priv->device = gf_rdma_get_device (this, cm_id->verbs, + device_name); + if (priv->device == NULL) { + gf_log (listener->name, GF_LOG_WARNING, + "cannot get infiniband device %s (peer:%s me:%s)", + device_name, this->peerinfo.identifier, + this->myinfo.identifier); + goto out; + } + + priv->peer.send_count = options->send_count; + priv->peer.recv_count = options->recv_count; + priv->peer.send_size = options->send_size; + priv->peer.recv_size = options->recv_size; + priv->peer.cm_id = cm_id; + INIT_LIST_HEAD (&priv->peer.ioq); + + pthread_mutex_init (&priv->write_mutex, NULL); + pthread_mutex_init (&priv->recv_mutex, NULL); + + cm_id->context = this; + + new = rpc_transport_ref (this); + this = NULL; +out: + if (this != NULL) { + if (this->private != NULL) { + GF_FREE (this->private); + } + + if (this->name != NULL) { + GF_FREE (this->name); + } + + GF_FREE (this); + } + + return new; +} + + +static int +gf_rdma_cm_handle_connect_request (struct rdma_cm_event *event) +{ + int ret = -1; + rpc_transport_t *this = NULL, *listener = NULL; + struct rdma_cm_id *child_cm_id = NULL, *listener_cm_id = NULL; + struct rdma_conn_param conn_param = {0, }; + gf_rdma_private_t *priv = NULL; + gf_rdma_options_t *options = NULL; + + child_cm_id = event->id; + listener_cm_id = event->listen_id; + + listener = listener_cm_id->context; + priv = listener->private; + options = &priv->options; + + this = gf_rdma_transport_new (listener, child_cm_id); + if (this == NULL) { + gf_log (listener->name, GF_LOG_WARNING, + "could not create a transport for incoming connection" + " (me.name:%s me.identifier:%s)", listener->name, + listener->myinfo.identifier); + rdma_destroy_id (child_cm_id); + goto out; + } + + gf_log (listener->name, GF_LOG_TRACE, + "got a connect request (me:%s peer:%s)", + listener->myinfo.identifier, this->peerinfo.identifier); + + ret = gf_rdma_create_qp (this); + if (ret < 0) { + gf_log (listener->name, GF_LOG_WARNING, + "could not create QP (peer:%s me:%s)", + this->peerinfo.identifier, this->myinfo.identifier); + gf_rdma_cm_handle_disconnect (this); + goto out; + } + + conn_param.responder_resources = 1; + conn_param.initiator_depth = 1; + conn_param.retry_count = options->attr_retry_cnt; + conn_param.rnr_retry_count = options->attr_rnr_retry; + + ret = rdma_accept(child_cm_id, &conn_param); + if (ret < 0) { + gf_log (listener->name, GF_LOG_WARNING, "rdma_accept failed " + "peer:%s me:%s (%s)", this->peerinfo.identifier, + this->myinfo.identifier, strerror (errno)); + gf_rdma_cm_handle_disconnect (this); + goto out; + } + + ret = 0; + +out: + return ret; +} + + +static int +gf_rdma_cm_handle_route_resolved (struct rdma_cm_event *event) +{ + struct rdma_conn_param conn_param = {0, }; + int ret = 0; + rpc_transport_t *this = NULL; + gf_rdma_private_t *priv = NULL; + gf_rdma_peer_t *peer = NULL; + gf_rdma_options_t *options = NULL; + + if (event == NULL) { + goto out; + } + + this = event->id->context; + + priv = this->private; + peer = &priv->peer; + options = &priv->options; + + ret = gf_rdma_create_qp (this); + if (ret != 0) { + gf_log (this->name, GF_LOG_WARNING, + "could not create QP (peer:%s me:%s)", + this->peerinfo.identifier, this->myinfo.identifier); + gf_rdma_cm_handle_disconnect (this); + goto out; + } + + memset(&conn_param, 0, sizeof conn_param); + conn_param.responder_resources = 1; + conn_param.initiator_depth = 1; + conn_param.retry_count = options->attr_retry_cnt; + conn_param.rnr_retry_count = options->attr_rnr_retry; + + ret = rdma_connect(peer->cm_id, &conn_param); + if (ret != 0) { + gf_log (this->name, GF_LOG_WARNING, + "rdma_connect failed (%s)", strerror (errno)); + gf_rdma_cm_handle_disconnect (this); + goto out; + } + + gf_log (this->name, GF_LOG_TRACE, "route resolved (me:%s peer:%s)", + this->myinfo.identifier, this->peerinfo.identifier); + + ret = 0; +out: + return ret; +} + + +static int +gf_rdma_cm_handle_addr_resolved (struct rdma_cm_event *event) +{ + rpc_transport_t *this = NULL; + gf_rdma_peer_t *peer = NULL; + gf_rdma_private_t *priv = NULL; + int ret = 0; + + this = event->id->context; + + priv = this->private; + peer = &priv->peer; + + GF_ASSERT (peer->cm_id == event->id); + + this->myinfo.sockaddr_len = sizeof (peer->cm_id->route.addr.src_addr); + memcpy (&this->myinfo.sockaddr, &peer->cm_id->route.addr.src_addr, + this->myinfo.sockaddr_len); + + this->peerinfo.sockaddr_len = sizeof (peer->cm_id->route.addr.dst_addr); + memcpy (&this->peerinfo.sockaddr, &peer->cm_id->route.addr.dst_addr, + this->peerinfo.sockaddr_len); + + gf_rdma_get_transport_identifiers (this); + + ret = rdma_resolve_route(peer->cm_id, 2000); + if (ret != 0) { + gf_log (this->name, GF_LOG_WARNING, + "rdma_resolve_route failed (me:%s peer:%s) (%s)", + this->myinfo.identifier, this->peerinfo.identifier, + strerror (errno)); + gf_rdma_cm_handle_disconnect (this); + } + + gf_log (this->name, GF_LOG_TRACE, "Address resolved (me:%s peer:%s)", + this->myinfo.identifier, this->peerinfo.identifier); + + return ret; +} + + +static void +gf_rdma_cm_handle_disconnect (rpc_transport_t *this) +{ + gf_rdma_private_t *priv = NULL; + char need_unref = 0, connected = 0; + + priv = this->private; + gf_log (this->name, GF_LOG_DEBUG, + "peer disconnected, cleaning up"); + + pthread_mutex_lock (&priv->write_mutex); + { + if (priv->peer.cm_id != NULL) { + need_unref = 1; + connected = priv->connected; priv->connected = 0; } + + __gf_rdma_teardown (this); } + pthread_mutex_unlock (&priv->write_mutex); + + if (connected) { + rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, this); + } + + if (need_unref) + rpc_transport_unref (this); + +} + + +static int +gf_rdma_cm_handle_event_established (struct rdma_cm_event *event) +{ + rpc_transport_t *this = NULL; + gf_rdma_private_t *priv = NULL; + struct rdma_cm_id *cm_id = NULL; + int ret = 0; + + cm_id = event->id; + this = cm_id->context; + priv = this->private; + + priv->connected = 1; + + pthread_mutex_lock (&priv->write_mutex); + { + priv->peer.quota = 1; + priv->peer.quota_set = 0; + } + pthread_mutex_unlock (&priv->write_mutex); + + if (priv->entity == GF_RDMA_CLIENT) { + ret = rpc_transport_notify (this, RPC_TRANSPORT_CONNECT, this); + + } else if (priv->entity == GF_RDMA_SERVER) { + ret = rpc_transport_notify (priv->listener, + RPC_TRANSPORT_ACCEPT, this); + } + + if (ret < 0) { + gf_rdma_disconnect (this); + } + + gf_log (this->name, GF_LOG_TRACE, + "recieved event RDMA_CM_EVENT_ESTABLISHED (me:%s peer:%s)", + this->myinfo.identifier, this->peerinfo.identifier); return ret; } +static int +gf_rdma_cm_handle_event_error (rpc_transport_t *this) +{ + gf_rdma_private_t *priv = NULL; + + priv = this->private; + + if (priv->entity != GF_RDMA_SERVER_LISTENER) { + gf_rdma_cm_handle_disconnect (this); + } + + return 0; +} + + +static int +gf_rdma_cm_handle_device_removal (struct rdma_cm_event *event) +{ + return 0; +} + + +static void * +gf_rdma_cm_event_handler (void *data) +{ + struct rdma_cm_event *event = NULL; + int ret = 0; + rpc_transport_t *this = NULL; + struct rdma_event_channel *event_channel = NULL; + + event_channel = data; + + while (1) { + ret = rdma_get_cm_event (event_channel, &event); + if (ret != 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "rdma_cm_get_event failed (%s)", + strerror (errno)); + break; + } + + switch (event->event) { + case RDMA_CM_EVENT_ADDR_RESOLVED: + gf_rdma_cm_handle_addr_resolved (event); + break; + + case RDMA_CM_EVENT_ROUTE_RESOLVED: + gf_rdma_cm_handle_route_resolved (event); + break; + + case RDMA_CM_EVENT_CONNECT_REQUEST: + gf_rdma_cm_handle_connect_request (event); + break; + + case RDMA_CM_EVENT_ESTABLISHED: + gf_rdma_cm_handle_event_established (event); + break; + + case RDMA_CM_EVENT_ADDR_ERROR: + case RDMA_CM_EVENT_ROUTE_ERROR: + case RDMA_CM_EVENT_CONNECT_ERROR: + case RDMA_CM_EVENT_UNREACHABLE: + case RDMA_CM_EVENT_REJECTED: + this = event->id->context; + + gf_log (this->name, GF_LOG_WARNING, + "cma event %s, error %d (me:%s peer:%s)\n", + rdma_event_str(event->event), event->status, + this->myinfo.identifier, + this->peerinfo.identifier); + + rdma_ack_cm_event (event); + event = NULL; + + gf_rdma_cm_handle_event_error (this); + continue; + + case RDMA_CM_EVENT_DISCONNECTED: + this = event->id->context; + + gf_log (this->name, GF_LOG_DEBUG, + "recieved disconnect (me:%s peer:%s)\n", + this->myinfo.identifier, + this->peerinfo.identifier); + + rdma_ack_cm_event (event); + event = NULL; + + gf_rdma_cm_handle_disconnect (this); + continue; + + case RDMA_CM_EVENT_DEVICE_REMOVAL: + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "device removed"); + gf_rdma_cm_handle_device_removal (event); + break; + + default: + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "unhandled event: %s, ignoring", + rdma_event_str(event->event)); + break; + } + + rdma_ack_cm_event (event); + } + + return NULL; +} + + static int32_t gf_rdma_post_send (struct ibv_qp *qp, gf_rdma_post_t *post, int32_t len) { @@ -354,33 +1140,33 @@ gf_rdma_post_send (struct ibv_qp *qp, gf_rdma_post_t *post, int32_t len) int __gf_rdma_encode_error(gf_rdma_peer_t *peer, gf_rdma_reply_info_t *reply_info, - struct iovec *rpchdr, uint32_t *ptr, + struct iovec *rpchdr, gf_rdma_header_t *hdr, gf_rdma_errcode_t err) { - uint32_t *startp = NULL; struct rpc_msg *rpc_msg = NULL; - startp = ptr; if (reply_info != NULL) { - *ptr++ = hton32(reply_info->rm_xid); + hdr->rm_xid = hton32(reply_info->rm_xid); } else { rpc_msg = rpchdr[0].iov_base; /* assume rpchdr contains * only one vector. * (which is true) */ - *ptr++ = rpc_msg->rm_xid; + hdr->rm_xid = rpc_msg->rm_xid; } - *ptr++ = hton32(GF_RDMA_VERSION); - *ptr++ = hton32(peer->send_count); - *ptr++ = hton32(GF_RDMA_ERROR); - *ptr++ = hton32(err); + hdr->rm_vers = hton32(GF_RDMA_VERSION); + hdr->rm_credit = hton32(peer->send_count); + hdr->rm_type = hton32(GF_RDMA_ERROR); + hdr->rm_body.rm_error.rm_type = hton32(err); if (err == ERR_VERS) { - *ptr++ = hton32(GF_RDMA_VERSION); - *ptr++ = hton32(GF_RDMA_VERSION); + hdr->rm_body.rm_error.rm_version.gf_rdma_vers_low + = hton32(GF_RDMA_VERSION); + hdr->rm_body.rm_error.rm_version.gf_rdma_vers_high + = hton32(GF_RDMA_VERSION); } - return (int)((unsigned long)ptr - (unsigned long)startp); + return sizeof (*hdr); } @@ -392,7 +1178,7 @@ __gf_rdma_send_error (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, int32_t ret = -1, len = 0; len = __gf_rdma_encode_error (peer, reply_info, entry->rpchdr, - (uint32_t *)post->buf, err); + (gf_rdma_header_t *)post->buf, err); if (len == -1) { gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, "encode error returned -1"); @@ -451,8 +1237,10 @@ __gf_rdma_create_read_chunks_from_vector (gf_rdma_peer_t *peer, vector[i].iov_len, IBV_ACCESS_REMOTE_READ); if (!mr) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "memory registration failed"); + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "memory registration failed (%s) (peer:%s)", + strerror (errno), + peer->trans->peerinfo.identifier); goto out; } @@ -501,8 +1289,8 @@ __gf_rdma_create_read_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, entry->rpchdr_count, request_ctx); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "cannot create read chunks from vector, " + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "cannot create read chunks from vector " "entry->rpchdr"); goto out; } @@ -514,8 +1302,8 @@ __gf_rdma_create_read_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, entry->proghdr_count, request_ctx); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "cannot create read chunks from vector, " + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "cannot create read chunks from vector " "entry->proghdr"); } @@ -527,8 +1315,8 @@ __gf_rdma_create_read_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, entry->prog_payload_count, request_ctx); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "cannot create read chunks from vector," + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "cannot create read chunks from vector" " entry->prog_payload"); } } @@ -541,8 +1329,8 @@ __gf_rdma_create_read_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, entry->prog_payload_count, request_ctx); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "cannot create read chunks from vector, " + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "cannot create read chunks from vector " "entry->prog_payload"); } } @@ -585,8 +1373,10 @@ __gf_rdma_create_write_chunks_from_vector (gf_rdma_peer_t *peer, IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE); if (!mr) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "memory registration failed"); + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "memory registration failed (%s) (peer:%s)", + strerror (errno), + peer->trans->peerinfo.identifier); goto out; } @@ -661,7 +1451,7 @@ __gf_rdma_create_write_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, entry->msg.request.rsp_payload_count, request_ctx); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "cannot create write chunks from vector " "entry->rpc_payload"); goto out; @@ -691,7 +1481,7 @@ __gf_rdma_create_write_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, entry->msg.request.rsphdr_count, request_ctx); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "cannot create write chunks from vector " "entry->rpchdr"); goto out; @@ -707,7 +1497,7 @@ out: } -inline void +static inline void __gf_rdma_deregister_mr (struct ibv_mr **mr, int count) { int i = 0; @@ -828,28 +1618,6 @@ out: } -static int32_t -gf_rdma_post_recv (struct ibv_srq *srq, - gf_rdma_post_t *post) -{ - struct ibv_sge list = { - .addr = (unsigned long) post->buf, - .length = post->buf_size, - .lkey = post->mr->lkey - }; - - struct ibv_recv_wr wr = { - .wr_id = (unsigned long) post, - .sg_list = &list, - .num_sge = 1, - }, *bad_wr; - - gf_rdma_post_ref (post); - - return ibv_post_srq_recv (srq, &wr, &bad_wr); -} - - int gf_rdma_post_unref (gf_rdma_post_t *post) { @@ -1035,10 +1803,11 @@ __gf_rdma_ioq_churn_request (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, chunkptr = &hdr->rm_body.rm_chunks[0]; if (rtype != gf_rdma_noch) { - ret = __gf_rdma_create_read_chunks (peer, entry, rtype, &chunkptr, + ret = __gf_rdma_create_read_chunks (peer, entry, rtype, + &chunkptr, request_ctx); if (ret != 0) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "creation of read chunks failed"); goto out; } @@ -1047,10 +1816,11 @@ __gf_rdma_ioq_churn_request (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, } if (wtype != gf_rdma_noch) { - ret = __gf_rdma_create_write_chunks (peer, entry, wtype, &chunkptr, + ret = __gf_rdma_create_write_chunks (peer, entry, wtype, + &chunkptr, request_ctx); if (ret != 0) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "creation of write/reply chunk failed"); goto out; } @@ -1108,7 +1878,7 @@ out: } -inline void +static inline void __gf_rdma_fill_reply_header (gf_rdma_header_t *header, struct iovec *rpchdr, gf_rdma_reply_info_t *reply_info, int credits) { @@ -1138,11 +1908,12 @@ __gf_rdma_fill_reply_header (gf_rdma_header_t *header, struct iovec *rpchdr, int32_t __gf_rdma_send_reply_inline (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, - gf_rdma_post_t *post, gf_rdma_reply_info_t *reply_info) + gf_rdma_post_t *post, + gf_rdma_reply_info_t *reply_info) { - gf_rdma_header_t *header = NULL; - int32_t send_size = 0, ret = 0; - char *buf = NULL; + gf_rdma_header_t *header = NULL; + int32_t send_size = 0, ret = 0; + char *buf = NULL; send_size = iov_length (entry->rpchdr, entry->rpchdr_count) + iov_length (entry->proghdr, entry->proghdr_count) @@ -1155,6 +1926,10 @@ __gf_rdma_send_reply_inline (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) { ret = __gf_rdma_send_error (peer, entry, post, reply_info, ERR_CHUNK); + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "msg size (%d) is greater than maximum size " + "of msg that can be sent inlined (%d)", + send_size, GLUSTERFS_RDMA_INLINE_THRESHOLD); goto out; } @@ -1189,7 +1964,7 @@ __gf_rdma_send_reply_inline (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, ret = send_size; } else { gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, - "gf_rdma_post_send (to %s) failed with ret = %d (%s)", + "posting send (to %s) failed with ret = %d (%s)", peer->trans->peerinfo.identifier, ret, (ret > 0) ? strerror (ret) : ""); gf_rdma_post_unref (post); @@ -1262,9 +2037,8 @@ __gf_rdma_register_local_mr_for_rdma (gf_rdma_peer_t *peer, gf_rdma_private_t *priv = NULL; gf_rdma_device_t *device = NULL; - if ((ctx == NULL) || (vector == NULL)) { - goto out; - } + GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, ctx, out); + GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, vector, out); priv = peer->trans->private; device = priv->device; @@ -1285,6 +2059,9 @@ __gf_rdma_register_local_mr_for_rdma (gf_rdma_peer_t *peer, vector[i].iov_len, IBV_ACCESS_LOCAL_WRITE); if (ctx->mr[ctx->mr_count] == NULL) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "registering memory for IBV_ACCESS_LOCAL_WRITE " + "failed (%s)", strerror (errno)); goto out; } @@ -1355,7 +2132,8 @@ __gf_rdma_write (gf_rdma_peer_t *peer, gf_rdma_post_t *post, struct iovec *vec, ret = ibv_post_send(peer->qp, &wr, &bad_wr); if (ret) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "rdma write to " + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "rdma write to " "client (%s) failed with ret = %d (%s)", peer->trans->peerinfo.identifier, ret, (ret > 0) ? strerror (ret) : ""); @@ -1390,6 +2168,8 @@ __gf_rdma_do_gf_rdma_write (gf_rdma_peer_t *peer, gf_rdma_post_t *post, ret = __gf_rdma_register_local_mr_for_rdma (peer, vector, count, &post->ctx); if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "registering memory region for rdma failed"); goto out; } @@ -1401,9 +2181,13 @@ __gf_rdma_do_gf_rdma_write (gf_rdma_peer_t *peer, gf_rdma_post_t *post, xfer_len = min (payload_size, reply_info->wc_array->wc_array[i].wc_target.rs_length); - ret = __gf_rdma_write (peer, post, vector, xfer_len, &payload_idx, + ret = __gf_rdma_write (peer, post, vector, xfer_len, + &payload_idx, &reply_info->wc_array->wc_array[i]); if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "rdma write to client (%s) failed", + peer->trans->peerinfo.identifier); goto out; } @@ -1422,12 +2206,12 @@ __gf_rdma_send_reply_type_nomsg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, gf_rdma_post_t *post, gf_rdma_reply_info_t *reply_info) { - gf_rdma_header_t *header = NULL; - char *buf = NULL; - uint32_t payload_size = 0; - int count = 0, i = 0; - int32_t ret = 0; - struct iovec vector[MAX_IOVEC]; + gf_rdma_header_t *header = NULL; + char *buf = NULL; + uint32_t payload_size = 0; + int count = 0, i = 0; + int32_t ret = 0; + struct iovec vector[MAX_IOVEC]; header = (gf_rdma_header_t *)post->buf; @@ -1442,9 +2226,10 @@ __gf_rdma_send_reply_type_nomsg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, /* encode reply chunklist */ buf = (char *)&header->rm_body.rm_chunks[2]; ret = __gf_rdma_reply_encode_write_chunks (peer, payload_size, post, - reply_info, (uint32_t **)&buf); + reply_info, + (uint32_t **)&buf); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "encoding write chunks failed"); ret = __gf_rdma_send_error (peer, entry, post, reply_info, ERR_CHUNK); @@ -1464,6 +2249,9 @@ __gf_rdma_send_reply_type_nomsg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, ret = __gf_rdma_do_gf_rdma_write (peer, post, vector, count, entry->iobref, reply_info); if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "rdma write to peer (%s) failed", + peer->trans->peerinfo.identifier); gf_rdma_post_unref (post); goto out; } @@ -1471,7 +2259,7 @@ __gf_rdma_send_reply_type_nomsg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, ret = gf_rdma_post_send (peer->qp, post, (buf - post->buf)); if (ret) { gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, - "gf_rdma_post_send to client (%s) failed with " + "posting a send request to client (%s) failed with " "ret = %d (%s)", peer->trans->peerinfo.identifier, ret, (ret > 0) ? strerror (ret) : ""); ret = -1; @@ -1487,12 +2275,13 @@ out: int32_t __gf_rdma_send_reply_type_msg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, - gf_rdma_post_t *post, gf_rdma_reply_info_t *reply_info) + gf_rdma_post_t *post, + gf_rdma_reply_info_t *reply_info) { - gf_rdma_header_t *header = NULL; - int32_t send_size = 0, ret = 0; - char *ptr = NULL; - uint32_t payload_size = 0; + gf_rdma_header_t *header = NULL; + int32_t send_size = 0, ret = 0; + char *ptr = NULL; + uint32_t payload_size = 0; send_size = iov_length (entry->rpchdr, entry->rpchdr_count) + iov_length (entry->proghdr, entry->proghdr_count) @@ -1524,7 +2313,7 @@ __gf_rdma_send_reply_type_msg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, reply_info, (uint32_t **)&ptr); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "encoding write chunks failed"); ret = __gf_rdma_send_error (peer, entry, post, reply_info, ERR_CHUNK); @@ -1540,6 +2329,8 @@ __gf_rdma_send_reply_type_msg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, entry->prog_payload_count, entry->iobref, reply_info); if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "rdma write to peer " + "(%s) failed", peer->trans->peerinfo.identifier); gf_rdma_post_unref (post); goto out; } @@ -1614,9 +2405,9 @@ __gf_rdma_ioq_churn_reply (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, int32_t ret = -1; gf_rdma_chunktype_t type = gf_rdma_noch; - if ((peer == NULL) || (entry == NULL) || (post == NULL)) { - goto out; - } + GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, peer, out); + GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, entry, out); + GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, post, out); reply_info = entry->msg.reply_info; if (reply_info != NULL) { @@ -1627,22 +2418,39 @@ __gf_rdma_ioq_churn_reply (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry, case gf_rdma_noch: ret = __gf_rdma_send_reply_inline (peer, entry, post, reply_info); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "failed to send reply to peer (%s) as an " + "inlined rdma msg", + peer->trans->peerinfo.identifier); + } break; case gf_rdma_replych: ret = __gf_rdma_send_reply_type_nomsg (peer, entry, post, reply_info); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "failed to send reply to peer (%s) as " + "RDMA_NOMSG", peer->trans->peerinfo.identifier); + } break; case gf_rdma_writech: ret = __gf_rdma_send_reply_type_msg (peer, entry, post, reply_info); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "failed to send reply with write chunks " + "to peer (%s)", + peer->trans->peerinfo.identifier); + } break; default: gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, - "invalid chunktype (%d) specified for sending reply", - type); + "invalid chunktype (%d) specified for sending reply " + " (peer:%s)", type, peer->trans->peerinfo.identifier); break; } @@ -1671,20 +2479,34 @@ __gf_rdma_ioq_churn_entry (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry) if (quota > 0) { post = gf_rdma_get_post (&device->sendq); if (post == NULL) { - post = gf_rdma_new_post (device, + post = gf_rdma_new_post (peer->trans, device, (options->send_size + 2048), GF_RDMA_SEND_POST); } if (post == NULL) { ret = -1; + gf_log_callingfn (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "not able to get a post to send msg"); goto out; } if (entry->is_request) { ret = __gf_rdma_ioq_churn_request (peer, entry, post); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "failed to process request ioq entry " + "to peer(%s)", + peer->trans->peerinfo.identifier); + } } else { ret = __gf_rdma_ioq_churn_reply (peer, entry, post); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "failed to process reply ioq entry " + "to peer (%s)", + peer->trans->peerinfo.identifier); + } } if (ret != 0) { @@ -1740,9 +2562,9 @@ gf_rdma_writev (rpc_transport_t *this, gf_rdma_ioq_t *entry) pthread_mutex_lock (&priv->write_mutex); { if (!priv->connected) { - gf_log (this->name, GF_LOG_DEBUG, - "rdma is not connected to post a " - "send request"); + gf_log (this->name, GF_LOG_WARNING, + "rdma is not connected to peer (%s)", + this->peerinfo.identifier); ret = -1; goto unlock; } @@ -1752,6 +2574,13 @@ gf_rdma_writev (rpc_transport_t *this, gf_rdma_ioq_t *entry) ret = __gf_rdma_ioq_churn_entry (peer, entry); if (ret != 0) { need_append = 0; + + if (ret < 0) { + gf_log (this->name, GF_LOG_WARNING, + "processing ioq entry destined " + "to (%s) failed", + this->peerinfo.identifier); + } } } @@ -1871,6 +2700,9 @@ gf_rdma_submit_request (rpc_transport_t *this, rpc_transport_req_t *req) entry = gf_rdma_ioq_new (this, &data); if (entry == NULL) { + gf_log (this->name, GF_LOG_WARNING, + "getting a new ioq entry failed (peer:%s)", + this->peerinfo.identifier); goto out; } @@ -1879,6 +2711,9 @@ gf_rdma_submit_request (rpc_transport_t *this, rpc_transport_req_t *req) if (ret > 0) { ret = 0; } else if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "sending request to peer (%s) failed", + this->peerinfo.identifier); rpc_transport_disconnect (this); } @@ -1901,6 +2736,9 @@ gf_rdma_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) entry = gf_rdma_ioq_new (this, &data); if (entry == NULL) { + gf_log (this->name, GF_LOG_WARNING, + "getting a new ioq entry failed (peer:%s)", + this->peerinfo.identifier); goto out; } @@ -1908,6 +2746,9 @@ gf_rdma_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) if (ret > 0) { ret = 0; } else if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "sending request to peer (%s) failed", + this->peerinfo.identifier); rpc_transport_disconnect (this); } @@ -1915,189 +2756,6 @@ out: return ret; } -#if 0 -static int -gf_rdma_receive (rpc_transport_t *this, char **hdr_p, size_t *hdrlen_p, - struct iobuf **iobuf_p) -{ - gf_rdma_private_t *priv = this->private; - /* TODO: return error if !priv->connected, check with locks */ - /* TODO: boundry checks for data_ptr/offset */ - char *copy_from = NULL; - gf_rdma_header_t *header = NULL; - uint32_t size1, size2, data_len = 0; - char *hdr = NULL; - struct iobuf *iobuf = NULL; - int32_t ret = 0; - - pthread_mutex_lock (&priv->recv_mutex); - { -/* - while (!priv->data_ptr) - pthread_cond_wait (&priv->recv_cond, &priv->recv_mutex); -*/ - - copy_from = priv->data_ptr + priv->data_offset; - - priv->data_ptr = NULL; - data_len = priv->data_len; - pthread_cond_broadcast (&priv->recv_cond); - } - pthread_mutex_unlock (&priv->recv_mutex); - - header = (gf_rdma_header_t *)copy_from; - if (strcmp (header->colonO, ":O")) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "%s: corrupt header received", this->name); - ret = -1; - goto err; - } - - size1 = ntoh32 (header->size1); - size2 = ntoh32 (header->size2); - - if (data_len != (size1 + size2 + sizeof (*header))) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "%s: sizeof data read from transport is not equal " - "to the size specified in the header", - this->name); - ret = -1; - goto err; - } - - copy_from += sizeof (*header); - - if (size1) { - hdr = GF_CALLOC (1, size1, gf_common_mt_char); - if (!hdr) { - gf_log (this->name, GF_LOG_ERROR, - "unable to allocate header for peer %s", - this->peerinfo.identifier); - ret = -ENOMEM; - goto err; - } - memcpy (hdr, copy_from, size1); - copy_from += size1; - *hdr_p = hdr; - } - *hdrlen_p = size1; - - if (size2) { - iobuf = iobuf_get2 (this->ctx->iobuf_pool, size2); - if (!iobuf) { - gf_log (this->name, GF_LOG_ERROR, - "unable to allocate IO buffer for peer %s", - this->peerinfo.identifier); - ret = -ENOMEM; - goto err; - } - memcpy (iobuf->ptr, copy_from, size2); - *iobuf_p = iobuf; - } - -err: - return ret; -} -#endif - - -static void -gf_rdma_destroy_cq (rpc_transport_t *this) -{ - gf_rdma_private_t *priv = NULL; - gf_rdma_device_t *device = NULL; - - priv = this->private; - device = priv->device; - - if (device->recv_cq) - ibv_destroy_cq (device->recv_cq); - device->recv_cq = NULL; - - if (device->send_cq) - ibv_destroy_cq (device->send_cq); - device->send_cq = NULL; - - return; -} - - -static int32_t -gf_rdma_create_cq (rpc_transport_t *this) -{ - gf_rdma_private_t *priv = NULL; - gf_rdma_options_t *options = NULL; - gf_rdma_device_t *device = NULL; - uint64_t send_cqe = 0; - int32_t ret = 0; - struct ibv_device_attr device_attr = {{0}, }; - - priv = this->private; - options = &priv->options; - device = priv->device; - - device->recv_cq = ibv_create_cq (priv->device->context, - options->recv_count * 2, - device, - device->recv_chan, - 0); - if (!device->recv_cq) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: creation of CQ for device %s failed", - this->name, device->device_name); - ret = -1; - goto out; - } else if (ibv_req_notify_cq (device->recv_cq, 0)) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: ibv_req_notify_cq on recv CQ of device %s failed", - this->name, device->device_name); - ret = -1; - goto out; - } - - do { - ret = ibv_query_device (priv->device->context, &device_attr); - if (ret != 0) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: ibv_query_device on %s returned %d (%s)", - this->name, priv->device->device_name, ret, - (ret > 0) ? strerror (ret) : ""); - ret = -1; - goto out; - } - - send_cqe = options->send_count * 128; - send_cqe = (send_cqe > device_attr.max_cqe) - ? device_attr.max_cqe : send_cqe; - - /* TODO: make send_cq size dynamically adaptive */ - device->send_cq = ibv_create_cq (priv->device->context, - send_cqe, device, - device->send_chan, 0); - if (!device->send_cq) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: creation of send_cq for device %s failed", - this->name, device->device_name); - ret = -1; - goto out; - } - - if (ibv_req_notify_cq (device->send_cq, 0)) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: ibv_req_notify_cq on send_cq for device %s" - " failed", this->name, device->device_name); - ret = -1; - goto out; - } - } while (0); - -out: - if (ret != 0) - gf_rdma_destroy_cq (this); - - return ret; -} - static int gf_rdma_register_peer (gf_rdma_device_t *device, int32_t qp_num, @@ -2196,25 +2854,6 @@ __gf_rdma_lookup_peer (gf_rdma_device_t *device, int32_t qp_num) return peer; } -/* - static gf_rdma_peer_t * - gf_rdma_lookup_peer (gf_rdma_device_t *device, - int32_t qp_num) - { - gf_rdma_qpreg_t *qpreg = NULL; - gf_rdma_peer_t *peer = NULL; - - qpreg = &device->qpreg; - pthread_mutex_lock (&qpreg->lock); - { - peer = __gf_rdma_lookup_peer (device, qp_num); - } - pthread_mutex_unlock (&qpreg->lock); - - return peer; - } -*/ - static void __gf_rdma_destroy_qp (rpc_transport_t *this) @@ -2224,7 +2863,7 @@ __gf_rdma_destroy_qp (rpc_transport_t *this) priv = this->private; if (priv->peer.qp) { gf_rdma_unregister_peer (priv->device, priv->peer.qp->qp_num); - ibv_destroy_qp (priv->peer.qp); + rdma_destroy_qp (priv->peer.cm_id); } priv->peer.qp = NULL; @@ -2235,18 +2874,36 @@ __gf_rdma_destroy_qp (rpc_transport_t *this) static int32_t gf_rdma_create_qp (rpc_transport_t *this) { - gf_rdma_private_t *priv = NULL; - gf_rdma_options_t *options = NULL; - gf_rdma_device_t *device = NULL; - int32_t ret = 0; - gf_rdma_peer_t *peer = NULL; + gf_rdma_private_t *priv = NULL; + gf_rdma_device_t *device = NULL; + int32_t ret = 0; + gf_rdma_peer_t *peer = NULL; + char *device_name = NULL; priv = this->private; - options = &priv->options; - device = priv->device; peer = &priv->peer; + device_name = (char *)ibv_get_device_name (peer->cm_id->verbs->device); + if (device_name == NULL) { + ret = -1; + gf_log (this->name, GF_LOG_WARNING, "cannot get device_name"); + goto out; + } + + device = gf_rdma_get_device (this, peer->cm_id->verbs, + device_name); + if (device == NULL) { + ret = -1; + gf_log (this->name, GF_LOG_WARNING, "cannot get device for " + "device %s", device_name); + goto out; + } + + if (priv->device == NULL) { + priv->device = device; + } + struct ibv_qp_init_attr init_attr = { .send_cq = device->send_cq, .recv_cq = device->recv_cq, @@ -2260,39 +2917,16 @@ gf_rdma_create_qp (rpc_transport_t *this) .qp_type = IBV_QPT_RC }; - struct ibv_qp_attr attr = { - .qp_state = IBV_QPS_INIT, - .pkey_index = 0, - .port_num = options->port, - .qp_access_flags - = IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE - }; - - peer->qp = ibv_create_qp (device->pd, &init_attr); - if (!peer->qp) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_CRITICAL, - "%s: could not create QP", - this->name); - ret = -1; - goto out; - } else if (ibv_modify_qp (peer->qp, &attr, - IBV_QP_STATE | - IBV_QP_PKEY_INDEX | - IBV_QP_PORT | - IBV_QP_ACCESS_FLAGS)) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_ERROR, - "%s: failed to modify QP to INIT state", - this->name); + ret = rdma_create_qp(peer->cm_id, device->pd, &init_attr); + if (ret != 0) { + gf_log (peer->trans->name, GF_LOG_CRITICAL, + "%s: could not create QP (%s)", this->name, + strerror (errno)); ret = -1; goto out; } - peer->local_lid = gf_rdma_get_local_lid (device->context, - options->port); - peer->local_qpn = peer->qp->qp_num; - peer->local_psn = lrand48 () & 0xffffff; + peer->qp = peer->cm_id->qp; ret = gf_rdma_register_peer (device, peer->qp->qp_num, peer); @@ -2304,300 +2938,52 @@ out: } -static void -gf_rdma_destroy_posts (rpc_transport_t *this) -{ - -} - - static int32_t -__gf_rdma_create_posts (rpc_transport_t *this, int32_t count, int32_t size, - gf_rdma_queue_t *q, gf_rdma_post_type_t type) -{ - int32_t i = 0; - int32_t ret = 0; - gf_rdma_private_t *priv = NULL; - gf_rdma_device_t *device = NULL; - - priv = this->private; - device = priv->device; - - for (i=0 ; i<count ; i++) { - gf_rdma_post_t *post = NULL; - - post = gf_rdma_new_post (device, size + 2048, type); - if (!post) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: post creation failed", - this->name); - ret = -1; - break; - } - - gf_rdma_put_post (q, post); - } - return ret; -} - - -static int32_t -gf_rdma_create_posts (rpc_transport_t *this) +__gf_rdma_teardown (rpc_transport_t *this) { - int32_t i = 0, ret = 0; - gf_rdma_post_t *post = NULL; - gf_rdma_private_t *priv = NULL; - gf_rdma_options_t *options = NULL; - gf_rdma_device_t *device = NULL; + gf_rdma_private_t *priv = NULL; + gf_rdma_peer_t *peer = NULL; priv = this->private; - options = &priv->options; - device = priv->device; - - ret = __gf_rdma_create_posts (this, options->send_count, - options->send_size, - &device->sendq, GF_RDMA_SEND_POST); - if (!ret) - ret = __gf_rdma_create_posts (this, options->recv_count, - options->recv_size, - &device->recvq, - GF_RDMA_RECV_POST); + peer = &priv->peer; - if (!ret) { - for (i=0 ; i<options->recv_count ; i++) { - post = gf_rdma_get_post (&device->recvq); - if (gf_rdma_post_recv (device->srq, post) != 0) { - ret = -1; - break; - } - } + if (peer->cm_id->qp != NULL) { + __gf_rdma_destroy_qp (this); } - if (ret) - gf_rdma_destroy_posts (this); - - return ret; -} - - -static int32_t -gf_rdma_connect_qp (rpc_transport_t *this) -{ - gf_rdma_private_t *priv = this->private; - gf_rdma_options_t *options = &priv->options; - struct ibv_qp_attr attr = { - .qp_state = IBV_QPS_RTR, - .path_mtu = options->mtu, - .dest_qp_num = priv->peer.remote_qpn, - .rq_psn = priv->peer.remote_psn, - .max_dest_rd_atomic = 1, - .min_rnr_timer = 12, - .qp_access_flags - = IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE, - .ah_attr = { - .is_global = 0, - .dlid = priv->peer.remote_lid, - .sl = 0, - .src_path_bits = 0, - .port_num = options->port - } - }; - if (ibv_modify_qp (priv->peer.qp, &attr, - IBV_QP_STATE | - IBV_QP_AV | - IBV_QP_PATH_MTU | - IBV_QP_DEST_QPN | - IBV_QP_RQ_PSN | - IBV_QP_MAX_DEST_RD_ATOMIC | - IBV_QP_MIN_RNR_TIMER)) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_CRITICAL, - "Failed to modify QP to RTR\n"); - return -1; + if (!list_empty (&priv->peer.ioq)) { + __gf_rdma_ioq_flush (peer); } - attr.qp_state = IBV_QPS_RTS; - attr.timeout = options->attr_timeout; - attr.retry_cnt = options->attr_retry_cnt; - attr.rnr_retry = options->attr_rnr_retry; - attr.sq_psn = priv->peer.local_psn; - attr.max_rd_atomic = 1; - if (ibv_modify_qp (priv->peer.qp, &attr, - IBV_QP_STATE | - IBV_QP_TIMEOUT | - IBV_QP_RETRY_CNT | - IBV_QP_RNR_RETRY | - IBV_QP_SQ_PSN | - IBV_QP_MAX_QP_RD_ATOMIC)) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_CRITICAL, - "Failed to modify QP to RTS\n"); - return -1; + if (peer->cm_id != NULL) { + rdma_destroy_id (peer->cm_id); + peer->cm_id = NULL; } + /* TODO: decrement cq size */ return 0; } + static int32_t -__gf_rdma_teardown (rpc_transport_t *this) +gf_rdma_teardown (rpc_transport_t *this) { + int32_t ret = 0; gf_rdma_private_t *priv = NULL; - priv = this->private; - __gf_rdma_destroy_qp (this); - - if (!list_empty (&priv->peer.ioq)) { - __gf_rdma_ioq_flush (&priv->peer); + if (this == NULL) { + goto out; } - /* TODO: decrement cq size */ - return 0; -} - -/* - * return value: - * 0 = success (completed) - * -1 = error - * > 0 = incomplete - */ - -static int -__tcp_rwv (rpc_transport_t *this, struct iovec *vector, int count, - struct iovec **pending_vector, int *pending_count, - int write) -{ - gf_rdma_private_t *priv = NULL; - int sock = -1; - int ret = -1; - struct iovec *opvector = NULL; - int opcount = 0; - int moved = 0; - priv = this->private; - sock = priv->sock; - opvector = vector; - opcount = count; - while (opcount) + pthread_mutex_lock (&priv->write_mutex); { - if (write) - { - ret = writev (sock, opvector, opcount); - - if (ret == 0 || (ret == -1 && errno == EAGAIN)) - { - /* done for now */ - break; - } - } - else - { - ret = readv (sock, opvector, opcount); - - if (ret == -1 && errno == EAGAIN) - { - /* done for now */ - break; - } - } - - if (ret == 0) - { - gf_log (this->name, GF_LOG_DEBUG, - "EOF from peer %s", this->peerinfo.identifier); - opcount = -1; - errno = ENOTCONN; - break; - } - - if (ret == -1) - { - if (errno == EINTR) - continue; - - gf_log (this->name, GF_LOG_DEBUG, - "%s failed (%s)", write ? "writev" : "readv", - strerror (errno)); - if (write && !priv->connected && - (errno == ECONNREFUSED)) - gf_log (this->name, GF_LOG_ERROR, - "possible mismatch of 'rpc-transport-type'" - " in protocol server and client. " - "check volume file"); - opcount = -1; - break; - } - - moved = 0; - - while (moved < ret) - { - if ((ret - moved) >= opvector[0].iov_len) - { - moved += opvector[0].iov_len; - opvector++; - opcount--; - } - else - { - opvector[0].iov_len -= (ret - moved); - opvector[0].iov_base += (ret - moved); - moved += (ret - moved); - } - while (opcount && !opvector[0].iov_len) - { - opvector++; - opcount--; - } - } - } - - if (pending_vector) - *pending_vector = opvector; - - if (pending_count) - *pending_count = opcount; - - return opcount; -} - - -static int -__tcp_readv (rpc_transport_t *this, struct iovec *vector, int count, - struct iovec **pending_vector, int *pending_count) -{ - int ret = -1; - - ret = __tcp_rwv (this, vector, count, - pending_vector, pending_count, 0); - - return ret; -} - - -static int -__tcp_writev (rpc_transport_t *this, struct iovec *vector, int count, - struct iovec **pending_vector, int *pending_count) -{ - int ret = -1; - gf_rdma_private_t *priv = NULL; - - priv = this->private; - - ret = __tcp_rwv (this, vector, count, pending_vector, - pending_count, 1); - - if (ret > 0) { - /* TODO: Avoid multiple calls when socket is already - registered for POLLOUT */ - priv->idx = event_select_on (this->ctx->event_pool, - priv->sock, priv->idx, -1, 1); - } else if (ret == 0) { - priv->idx = event_select_on (this->ctx->event_pool, - priv->sock, - priv->idx, -1, 0); + ret = __gf_rdma_teardown (this); } + pthread_mutex_unlock (&priv->write_mutex); +out: return ret; } @@ -2692,10 +3078,11 @@ inline int32_t gf_rdma_decode_error_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post, size_t bytes_in_post) { - gf_rdma_header_t *header = NULL; - struct iobuf *iobuf = NULL; - struct iobref *iobref = NULL; - int32_t ret = -1; + gf_rdma_header_t *header = NULL; + struct iobuf *iobuf = NULL; + struct iobref *iobref = NULL; + int32_t ret = -1; + struct rpc_msg rpc_msg = {0, }; header = (gf_rdma_header_t *)post->buf; header->rm_body.rm_error.rm_type @@ -2707,6 +3094,10 @@ gf_rdma_decode_error_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post, ntoh32 (header->rm_body.rm_error.rm_version.gf_rdma_vers_high); } + rpc_msg.rm_xid = header->rm_xid; + rpc_msg.rm_direction = REPLY; + rpc_msg.rm_reply.rp_stat = MSG_DENIED; + iobuf = iobuf_get2 (peer->trans->ctx->iobuf_pool, bytes_in_post); if (iobuf == NULL) { ret = -1; @@ -2721,15 +3112,15 @@ gf_rdma_decode_error_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post, iobref_add (iobref, iobuf); iobuf_unref (iobuf); - /* - * FIXME: construct an appropriate rpc-msg here, what is being sent - * to rpc is not correct. - */ - post->ctx.vector[0].iov_base = iobuf_ptr (iobuf); - post->ctx.vector[0].iov_len = bytes_in_post; - - memcpy (post->ctx.vector[0].iov_base, (char *)post->buf, - post->ctx.vector[0].iov_len); + + ret = rpc_reply_to_xdr (&rpc_msg, iobuf_ptr (iobuf), + iobuf_pagesize (iobuf), &post->ctx.vector[0]); + if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "Failed to create RPC reply"); + goto out; + } + post->ctx.count = 1; iobuf = NULL; @@ -2767,6 +3158,8 @@ gf_rdma_decode_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post, ret = gf_rdma_get_read_chunklist (&ptr, readch); if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "cannot get read chunklist from msg"); goto out; } @@ -2775,6 +3168,8 @@ gf_rdma_decode_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post, ret = gf_rdma_get_write_chunklist (&ptr, &write_ary); if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "cannot get write chunklist from msg"); goto out; } @@ -2784,6 +3179,8 @@ gf_rdma_decode_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post, if (write_ary != NULL) { reply_info = gf_rdma_reply_info_alloc (peer); if (reply_info == NULL) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "reply_info_alloc failed"); ret = -1; goto out; } @@ -2794,12 +3191,16 @@ gf_rdma_decode_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post, } else { ret = gf_rdma_get_write_chunklist (&ptr, &write_ary); if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "cannot get reply chunklist from msg"); goto out; } if (write_ary != NULL) { reply_info = gf_rdma_reply_info_alloc (peer); if (reply_info == NULL) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "reply_info_alloc_failed"); ret = -1; goto out; } @@ -2837,9 +3238,7 @@ out: *readch = NULL; } - if (write_ary != NULL) { - GF_FREE (write_ary); - } + GF_FREE (write_ary); } return ret; @@ -2865,28 +3264,36 @@ gf_rdma_decode_header (gf_rdma_peer_t *peer, gf_rdma_post_t *post, case GF_RDMA_MSG: case GF_RDMA_NOMSG: ret = gf_rdma_decode_msg (peer, post, readch, bytes_in_post); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "cannot decode msg of type (%d)", + header->rm_type); + } + break; case GF_RDMA_MSGP: - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "rdma msg of msg-type GF_RDMA_MSGP should not have " "been received"); ret = -1; break; case GF_RDMA_DONE: - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "rdma msg of msg-type GF_RDMA_DONE should not have " "been received"); ret = -1; break; case GF_RDMA_ERROR: - /* ret = gf_rdma_decode_error_msg (peer, post, bytes_in_post); */ + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "received a msg of type RDMA_ERROR"); + ret = gf_rdma_decode_error_msg (peer, post, bytes_in_post); break; default: - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "unknown rdma msg-type (%d)", header->rm_type); } @@ -2904,6 +3311,8 @@ __gf_rdma_read (gf_rdma_peer_t *peer, gf_rdma_post_t *post, struct iovec *to, ret = __gf_rdma_register_local_mr_for_rdma (peer, to, 1, &post->ctx); if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "registering local memory for rdma read failed"); goto out; } @@ -2921,7 +3330,8 @@ __gf_rdma_read (gf_rdma_peer_t *peer, gf_rdma_post_t *post, struct iovec *to, ret = ibv_post_send (peer->qp, &wr, &bad_wr); if (ret) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, "rdma read from client " + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "rdma read from client " "(%s) failed with ret = %d (%s)", peer->trans->peerinfo.identifier, ret, (ret > 0) ? strerror (ret) : ""); @@ -2950,7 +3360,7 @@ gf_rdma_do_reads (gf_rdma_peer_t *peer, gf_rdma_post_t *post, } if (i == 0) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "message type specified as rdma-read but there are no " "rdma read-chunks present"); goto out; @@ -2980,6 +3390,10 @@ gf_rdma_do_reads (gf_rdma_peer_t *peer, gf_rdma_post_t *post, pthread_mutex_lock (&priv->write_mutex); { if (!priv->connected) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "transport not connected to peer (%s), " + "not doing rdma reads", + peer->trans->peerinfo.identifier); goto unlock; } @@ -2993,6 +3407,9 @@ gf_rdma_do_reads (gf_rdma_peer_t *peer, gf_rdma_post_t *post, &post->ctx.vector[count], &readch[i]); if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "rdma read from peer (%s) failed", + peer->trans->peerinfo.identifier); goto unlock; } @@ -3099,6 +3516,10 @@ gf_rdma_pollin_notify (gf_rdma_peer_t *peer, gf_rdma_post_t *post) ret = rpc_transport_notify (peer->trans, RPC_TRANSPORT_MSG_RECEIVED, pollin); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "transport_notify failed"); + } out: if (pollin != NULL) { @@ -3159,15 +3580,15 @@ gf_rdma_recv_reply (gf_rdma_peer_t *peer, gf_rdma_post_t *post) RPC_TRANSPORT_MAP_XID_REQUEST, &request_info); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "cannot get request information from rpc " - "layer"); + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "cannot get request information (peer:%s) from rpc " + "layer", peer->trans->peerinfo.identifier); goto out; } rpc_req = request_info.rpc_req; if (rpc_req == NULL) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "rpc request structure not found"); ret = -1; goto out; @@ -3185,6 +3606,10 @@ gf_rdma_recv_reply (gf_rdma_peer_t *peer, gf_rdma_post_t *post) out: if (ret == 0) { ret = gf_rdma_pollin_notify (peer, post); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "pollin notify failed"); + } } return ret; @@ -3199,10 +3624,15 @@ gf_rdma_recv_request (gf_rdma_peer_t *peer, gf_rdma_post_t *post, if (readch != NULL) { ret = gf_rdma_do_reads (peer, post, readch); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "rdma read from peer (%s) failed", + peer->trans->peerinfo.identifier); + } } else { ret = gf_rdma_pollin_notify (peer, post); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "pollin notification failed"); } } @@ -3219,23 +3649,44 @@ gf_rdma_process_recv (gf_rdma_peer_t *peer, struct ibv_wc *wc) uint32_t *ptr = NULL; enum msg_type msg_type = 0; gf_rdma_header_t *header = NULL; + gf_rdma_private_t *priv = NULL; post = (gf_rdma_post_t *) (long) wc->wr_id; if (post == NULL) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "no post found in successful work completion element"); goto out; } ret = gf_rdma_decode_header (peer, post, &readch, wc->byte_len); if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "decoding of header failed"); goto out; } header = (gf_rdma_header_t *)post->buf; + priv = peer->trans->private; + + pthread_mutex_lock (&priv->write_mutex); + { + if (!priv->peer.quota_set) { + priv->peer.quota_set = 1; + + /* Initially peer.quota is set to 1 as per RFC 5666. We + * have to account for the quota used while sending + * first msg (which may or may not be returned to pool + * at this point) while deriving peer.quota from + * header->rm_credit. Hence the arithmatic below, + * instead of directly setting it to header->rm_credit. + */ + priv->peer.quota = header->rm_credit + - ( 1 - priv->peer.quota); + } + } + pthread_mutex_unlock (&priv->write_mutex); + switch (header->rm_type) { case GF_RDMA_MSG: ptr = (uint32_t *)post->ctx.vector[0].iov_base; @@ -3251,30 +3702,48 @@ gf_rdma_process_recv (gf_rdma_peer_t *peer, struct ibv_wc *wc) break; case GF_RDMA_ERROR: - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "an error has happened while transmission of msg, " - "disconnecting the transport"); - rpc_transport_disconnect (peer->trans); - goto out; - -/* ret = gf_rdma_pollin_notify (peer, post); - if (ret == -1) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "pollin notification failed"); - } - goto out; -*/ + if (header->rm_body.rm_error.rm_type == ERR_CHUNK) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "peer (%s), couldn't encode or decode the msg " + "properly or write chunks were not provided " + "for replies that were bigger than " + "RDMA_INLINE_THRESHOLD (%d)", + peer->trans->peerinfo.identifier, + GLUSTERFS_RDMA_INLINE_THRESHOLD); + ret = gf_rdma_pollin_notify (peer, post); + if (ret == -1) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + "pollin notification failed"); + } + goto out; + } else { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, + "an error has happened while transmission of " + "msg, disconnecting the transport"); + ret = -1; + goto out; + } default: - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "invalid rdma msg-type (%d)", header->rm_type); - break; + goto out; } if (msg_type == CALL) { ret = gf_rdma_recv_request (peer, post, readch); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "receiving a request from peer (%s) failed", + peer->trans->peerinfo.identifier); + } } else { ret = gf_rdma_recv_reply (peer, post); + if (ret < 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "receiving a reply from peer (%s) failed", + peer->trans->peerinfo.identifier); + } } out: @@ -3285,6 +3754,42 @@ out: return; } +void * +gf_rdma_async_event_thread (void *context) +{ + struct ibv_async_event event; + int ret; + + while (1) { + do { + ret = ibv_get_async_event((struct ibv_context *)context, + &event); + + if (ret && errno != EINTR) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "Error getting event (%s)", + strerror (errno)); + } + } while(ret && errno == EINTR); + + switch (event.event_type) { + case IBV_EVENT_SRQ_LIMIT_REACHED: + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "recieved srq_limit reached"); + break; + + default: + gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, + "event (%d) recieved", event.event_type); + break; + } + + ibv_ack_async_event(&event); + } + + return 0; +} + static void * gf_rdma_recv_completion_proc (void *data) @@ -3403,7 +3908,7 @@ gf_rdma_handle_failed_send_completion (gf_rdma_peer_t *peer, struct ibv_wc *wc) post = (gf_rdma_post_t *) (long) wc->wr_id; - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "send work request on `%s' returned error " "wc.status = %d, wc.vendor_err = %d, post->buf = %p, " "wc.byte_len = %d, post->reused = %d", @@ -3670,364 +4175,87 @@ gf_rdma_options_init (rpc_transport_t *this) return; } -static void -gf_rdma_queue_init (gf_rdma_queue_t *queue) -{ - pthread_mutex_init (&queue->lock, NULL); - - queue->active_posts.next = &queue->active_posts; - queue->active_posts.prev = &queue->active_posts; - queue->passive_posts.next = &queue->passive_posts; - queue->passive_posts.prev = &queue->passive_posts; -} - -static gf_rdma_device_t * -gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx) +gf_rdma_ctx_t * +__gf_rdma_ctx_create (void) { - glusterfs_ctx_t *ctx = NULL; - gf_rdma_private_t *priv = NULL; - gf_rdma_options_t *options = NULL; - char *device_name = NULL; - uint32_t port = 0; - uint8_t active_port = 0; - int32_t ret = 0; - int32_t i = 0; - gf_rdma_device_t *trav = NULL; + gf_rdma_ctx_t *rdma_ctx = NULL; + int ret = -1; - priv = this->private; - options = &priv->options; - device_name = priv->options.device_name; - ctx = this->ctx; - trav = ctx->ib; - port = priv->options.port; - - while (trav) { - if ((!strcmp (trav->device_name, device_name)) && - (trav->port == port)) - break; - trav = trav->next; + rdma_ctx = GF_CALLOC (1, sizeof (*rdma_ctx), gf_common_mt_char); + if (rdma_ctx == NULL) { + goto out; } - if (!trav) { - - trav = GF_CALLOC (1, sizeof (*trav), - gf_common_mt_rdma_device_t); - if (trav == NULL) { - return NULL; - } - - priv->device = trav; - - trav->context = ibctx; - - ret = ib_get_active_port (trav->context); - - if (ret < 0) { - if (!port) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "Failed to find any active ports and " - "none specified in volume file," - " exiting"); - GF_FREE (trav); - return NULL; - } - } - - trav->request_ctx_pool = mem_pool_new (gf_rdma_request_context_t, - GF_RDMA_POOL_SIZE); - if (trav->request_ctx_pool == NULL) { - return NULL; - } - - trav->ioq_pool = mem_pool_new (gf_rdma_ioq_t, GF_RDMA_POOL_SIZE); - if (trav->ioq_pool == NULL) { - mem_pool_destroy (trav->request_ctx_pool); - return NULL; - } - - trav->reply_info_pool = mem_pool_new (gf_rdma_reply_info_t, - GF_RDMA_POOL_SIZE); - if (trav->reply_info_pool == NULL) { - mem_pool_destroy (trav->request_ctx_pool); - mem_pool_destroy (trav->ioq_pool); - return NULL; - } - - - active_port = ret; - - if (port) { - ret = ib_check_active_port (trav->context, port); - if (ret < 0) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, - "On device %s: provided port:%u is " - "found to be offline, continuing to " - "use the same port", device_name, port); - } - } else { - priv->options.port = active_port; - port = active_port; - gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE, - "Port unspecified in volume file using active " - "port: %u", port); - } - - trav->device_name = gf_strdup (device_name); - trav->port = port; - - trav->next = ctx->ib; - ctx->ib = trav; - - trav->send_chan = ibv_create_comp_channel (trav->context); - if (!trav->send_chan) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: could not create send completion channel", - device_name); - mem_pool_destroy (trav->ioq_pool); - mem_pool_destroy (trav->request_ctx_pool); - mem_pool_destroy (trav->reply_info_pool); - GF_FREE ((char *)trav->device_name); - GF_FREE (trav); - return NULL; - } - - trav->recv_chan = ibv_create_comp_channel (trav->context); - if (!trav->recv_chan) { - mem_pool_destroy (trav->ioq_pool); - mem_pool_destroy (trav->request_ctx_pool); - mem_pool_destroy (trav->reply_info_pool); - ibv_destroy_comp_channel (trav->send_chan); - GF_FREE ((char *)trav->device_name); - GF_FREE (trav); - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "could not create recv completion channel"); - /* TODO: cleanup current mess */ - return NULL; - } - - if (gf_rdma_create_cq (this) < 0) { - mem_pool_destroy (trav->ioq_pool); - mem_pool_destroy (trav->request_ctx_pool); - mem_pool_destroy (trav->reply_info_pool); - ibv_destroy_comp_channel (trav->recv_chan); - ibv_destroy_comp_channel (trav->send_chan); - GF_FREE ((char *)trav->device_name); - GF_FREE (trav); - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: could not create CQ", - this->name); - return NULL; - } - - /* protection domain */ - trav->pd = ibv_alloc_pd (trav->context); - - if (!trav->pd) { - mem_pool_destroy (trav->ioq_pool); - mem_pool_destroy (trav->request_ctx_pool); - mem_pool_destroy (trav->reply_info_pool); - gf_rdma_destroy_cq (this); - ibv_destroy_comp_channel (trav->recv_chan); - ibv_destroy_comp_channel (trav->send_chan); - GF_FREE ((char *)trav->device_name); - GF_FREE (trav); - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: could not allocate protection domain", - this->name); - return NULL; - } - - struct ibv_srq_init_attr attr = { - .attr = { - .max_wr = options->recv_count, - .max_sge = 1 - } - }; - trav->srq = ibv_create_srq (trav->pd, &attr); - - if (!trav->srq) { - mem_pool_destroy (trav->ioq_pool); - mem_pool_destroy (trav->request_ctx_pool); - mem_pool_destroy (trav->reply_info_pool); - ibv_dealloc_pd (trav->pd); - gf_rdma_destroy_cq (this); - ibv_destroy_comp_channel (trav->recv_chan); - ibv_destroy_comp_channel (trav->send_chan); - GF_FREE ((char *)trav->device_name); - GF_FREE (trav); - - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: could not create SRQ", - this->name); - return NULL; - } - - /* queue init */ - gf_rdma_queue_init (&trav->sendq); - gf_rdma_queue_init (&trav->recvq); - - if (gf_rdma_create_posts (this) < 0) { - mem_pool_destroy (trav->ioq_pool); - mem_pool_destroy (trav->request_ctx_pool); - mem_pool_destroy (trav->reply_info_pool); - ibv_dealloc_pd (trav->pd); - gf_rdma_destroy_cq (this); - ibv_destroy_comp_channel (trav->recv_chan); - ibv_destroy_comp_channel (trav->send_chan); - GF_FREE ((char *)trav->device_name); - GF_FREE (trav); - - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: could not allocate posts", - this->name); - return NULL; - } - - /* completion threads */ - ret = pthread_create (&trav->send_thread, - NULL, - gf_rdma_send_completion_proc, - trav->send_chan); - if (ret) { - gf_rdma_destroy_posts (this); - mem_pool_destroy (trav->ioq_pool); - mem_pool_destroy (trav->request_ctx_pool); - mem_pool_destroy (trav->reply_info_pool); - ibv_dealloc_pd (trav->pd); - gf_rdma_destroy_cq (this); - ibv_destroy_comp_channel (trav->recv_chan); - ibv_destroy_comp_channel (trav->send_chan); - GF_FREE ((char *)trav->device_name); - GF_FREE (trav); + rdma_ctx->rdma_cm_event_channel = rdma_create_event_channel (); + if (rdma_ctx->rdma_cm_event_channel == NULL) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "rdma_cm event channel creation failed (%s)", + strerror (errno)); + goto out; + } - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "could not create send completion thread"); - return NULL; - } + ret = gf_thread_create (&rdma_ctx->rdma_cm_thread, NULL, + gf_rdma_cm_event_handler, + rdma_ctx->rdma_cm_event_channel); + if (ret != 0) { + gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, + "creation of thread to handle rdma-cm events " + "failed (%s)", strerror (ret)); + goto out; + } - ret = pthread_create (&trav->recv_thread, - NULL, - gf_rdma_recv_completion_proc, - trav->recv_chan); - if (ret) { - gf_rdma_destroy_posts (this); - mem_pool_destroy (trav->ioq_pool); - mem_pool_destroy (trav->request_ctx_pool); - mem_pool_destroy (trav->reply_info_pool); - ibv_dealloc_pd (trav->pd); - gf_rdma_destroy_cq (this); - ibv_destroy_comp_channel (trav->recv_chan); - ibv_destroy_comp_channel (trav->send_chan); - GF_FREE ((char *)trav->device_name); - GF_FREE (trav); - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "could not create recv completion thread"); - return NULL; +out: + if (ret < 0) { + if (rdma_ctx->rdma_cm_event_channel != NULL) { + rdma_destroy_event_channel (rdma_ctx->rdma_cm_event_channel); } - /* qpreg */ - pthread_mutex_init (&trav->qpreg.lock, NULL); - for (i=0; i<42; i++) { - trav->qpreg.ents[i].next = &trav->qpreg.ents[i]; - trav->qpreg.ents[i].prev = &trav->qpreg.ents[i]; - } + GF_FREE (rdma_ctx); + rdma_ctx = NULL; } - return trav; + + return rdma_ctx; } static int32_t gf_rdma_init (rpc_transport_t *this) { gf_rdma_private_t *priv = NULL; - gf_rdma_options_t *options = NULL; - struct ibv_device **dev_list; - struct ibv_context *ib_ctx = NULL; int32_t ret = 0; + glusterfs_ctx_t *ctx = NULL; + gf_rdma_options_t *options = NULL; + + ctx= this->ctx; priv = this->private; - options = &priv->options; ibv_fork_init (); gf_rdma_options_init (this); - { - dev_list = ibv_get_device_list (NULL); - - if (!dev_list) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_CRITICAL, - "Failed to get IB devices"); - ret = -1; - goto cleanup; - } - - if (!*dev_list) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_CRITICAL, - "No IB devices found"); - ret = -1; - goto cleanup; - } - - if (!options->device_name) { - if (*dev_list) { - options->device_name = - gf_strdup (ibv_get_device_name (*dev_list)); - } else { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_CRITICAL, - "IB device list is empty. Check for " - "'ib_uverbs' module"); - return -1; - goto cleanup; - } - } - - while (*dev_list) { - if (!strcmp (ibv_get_device_name (*dev_list), - options->device_name)) { - ib_ctx = ibv_open_device (*dev_list); - - if (!ib_ctx) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_ERROR, - "Failed to get infiniband" - "device context"); - ret = -1; - goto cleanup; - } - break; - } - ++dev_list; - } - - priv->device = gf_rdma_get_device (this, ib_ctx); - - if (!priv->device) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "could not create rdma device for %s", - options->device_name); - ret = -1; - goto cleanup; - } - } + options = &priv->options; + priv->peer.send_count = options->send_count; + priv->peer.recv_count = options->recv_count; + priv->peer.send_size = options->send_size; + priv->peer.recv_size = options->recv_size; priv->peer.trans = this; INIT_LIST_HEAD (&priv->peer.ioq); - pthread_mutex_init (&priv->read_mutex, NULL); pthread_mutex_init (&priv->write_mutex, NULL); pthread_mutex_init (&priv->recv_mutex, NULL); pthread_cond_init (&priv->recv_cond, NULL); -cleanup: - if (-1 == ret) { - if (ib_ctx) - ibv_close_device (ib_ctx); + pthread_mutex_lock (&ctx->lock); + { + if (ctx->ib == NULL) { + ctx->ib = __gf_rdma_ctx_create (); + if (ctx->ib == NULL) { + ret = -1; + } + } } - - if (dev_list) - ibv_free_device_list (dev_list); + pthread_mutex_unlock (&ctx->lock); return ret; } @@ -4040,543 +4268,69 @@ gf_rdma_disconnect (rpc_transport_t *this) int32_t ret = 0; priv = this->private; - pthread_mutex_lock (&priv->write_mutex); - { - ret = __gf_rdma_disconnect (this); - } - pthread_mutex_unlock (&priv->write_mutex); - - return ret; -} - - -static int32_t -__tcp_connect_finish (int fd) -{ - int ret = -1; - int optval = 0; - socklen_t optlen = sizeof (int); - - ret = getsockopt (fd, SOL_SOCKET, SO_ERROR, - (void *)&optval, &optlen); - - if (ret == 0 && optval) - { - errno = optval; - ret = -1; - } - - return ret; -} - -static inline void -gf_rdma_fill_handshake_data (char *buf, struct gf_rdma_nbio *nbio, - gf_rdma_private_t *priv) -{ - sprintf (buf, - "QP1:RECV_BLKSIZE=%08x:SEND_BLKSIZE=%08x\n" - "QP1:LID=%04x:QPN=%06x:PSN=%06x\n", - priv->peer.recv_size, - priv->peer.send_size, - priv->peer.local_lid, - priv->peer.local_qpn, - priv->peer.local_psn); - - nbio->vector.iov_base = buf; - nbio->vector.iov_len = strlen (buf) + 1; - nbio->count = 1; - return; -} - -static inline void -gf_rdma_fill_handshake_ack (char *buf, struct gf_rdma_nbio *nbio) -{ - sprintf (buf, "DONE\n"); - nbio->vector.iov_base = buf; - nbio->vector.iov_len = strlen (buf) + 1; - nbio->count = 1; - return; -} - -static int -gf_rdma_handshake_pollin (rpc_transport_t *this) -{ - int ret = 0; - gf_rdma_private_t *priv = NULL; - char *buf = NULL; - int32_t recv_buf_size = 0, send_buf_size; - socklen_t sock_len = 0; - - priv = this->private; - buf = priv->handshake.incoming.buf; - - if (priv->handshake.incoming.state == GF_RDMA_HANDSHAKE_COMPLETE) { - return -1; - } + gf_log_callingfn (this->name, GF_LOG_WARNING, + "disconnect called (peer:%s)", + this->peerinfo.identifier); pthread_mutex_lock (&priv->write_mutex); { - while (priv->handshake.incoming.state != GF_RDMA_HANDSHAKE_COMPLETE) - { - switch (priv->handshake.incoming.state) - { - case GF_RDMA_HANDSHAKE_START: - buf = priv->handshake.incoming.buf = GF_CALLOC (1, 256, gf_common_mt_char); - gf_rdma_fill_handshake_data (buf, &priv->handshake.incoming, priv); - buf[0] = 0; - priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVING_DATA; - break; - - case GF_RDMA_HANDSHAKE_RECEIVING_DATA: - ret = __tcp_readv (this, - &priv->handshake.incoming.vector, - priv->handshake.incoming.count, - &priv->handshake.incoming.pending_vector, - &priv->handshake.incoming.pending_count); - if (ret == -1) { - goto unlock; - } - - if (ret > 0) { - gf_log (this->name, GF_LOG_TRACE, - "partial header read on NB socket. continue later"); - goto unlock; - } - - if (!ret) { - priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVED_DATA; - } - break; - - case GF_RDMA_HANDSHAKE_RECEIVED_DATA: - ret = sscanf (buf, - "QP1:RECV_BLKSIZE=%08x:SEND_BLKSIZE=%08x\n" - "QP1:LID=%04x:QPN=%06x:PSN=%06x\n", - &recv_buf_size, - &send_buf_size, - &priv->peer.remote_lid, - &priv->peer.remote_qpn, - &priv->peer.remote_psn); - - if ((ret != 5) && (strncmp (buf, "QP1:", 4))) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_CRITICAL, - "%s: remote-host(%s)'s " - "transport type is different", - this->name, - this->peerinfo.identifier); - ret = -1; - goto unlock; - } - - if (recv_buf_size < priv->peer.recv_size) - priv->peer.recv_size = recv_buf_size; - if (send_buf_size < priv->peer.send_size) - priv->peer.send_size = send_buf_size; - - gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE, - "%s: transacted recv_size=%d " - "send_size=%d", - this->name, priv->peer.recv_size, - priv->peer.send_size); - - priv->peer.quota = priv->peer.send_count; - - if (gf_rdma_connect_qp (this)) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_ERROR, - "%s: failed to connect with " - "remote QP", this->name); - ret = -1; - goto unlock; - } - gf_rdma_fill_handshake_ack (buf, &priv->handshake.incoming); - buf[0] = 0; - priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVING_ACK; - break; - - case GF_RDMA_HANDSHAKE_RECEIVING_ACK: - ret = __tcp_readv (this, - &priv->handshake.incoming.vector, - priv->handshake.incoming.count, - &priv->handshake.incoming.pending_vector, - &priv->handshake.incoming.pending_count); - if (ret == -1) { - goto unlock; - } - - if (ret > 0) { - gf_log (this->name, GF_LOG_TRACE, - "partial header read on NB " - "socket. continue later"); - goto unlock; - } - - if (!ret) { - priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVED_ACK; - } - break; - - case GF_RDMA_HANDSHAKE_RECEIVED_ACK: - if (strncmp (buf, "DONE", 4)) { - gf_log (GF_RDMA_LOG_NAME, - GF_LOG_DEBUG, - "%s: handshake-3 did not " - "return 'DONE' (%s)", - this->name, buf); - ret = -1; - goto unlock; - } - ret = 0; - priv->connected = 1; - sock_len = sizeof (struct sockaddr_storage); - getpeername (priv->sock, - (struct sockaddr *) &this->peerinfo.sockaddr, - &sock_len); - - GF_FREE (priv->handshake.incoming.buf); - priv->handshake.incoming.buf = NULL; - priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_COMPLETE; - } - } - } -unlock: - pthread_mutex_unlock (&priv->write_mutex); - - if (ret == -1) { - rpc_transport_disconnect (this); - } else { - ret = 0; - } - - - if (!ret && priv->connected) { - if (priv->is_server) { - ret = rpc_transport_notify (priv->listener, - RPC_TRANSPORT_ACCEPT, - this); - } else { - ret = rpc_transport_notify (this, RPC_TRANSPORT_CONNECT, - this); - } - } - - return ret; -} - -static int -gf_rdma_handshake_pollout (rpc_transport_t *this) -{ - gf_rdma_private_t *priv = NULL; - char *buf = NULL; - int32_t ret = 0; - - priv = this->private; - buf = priv->handshake.outgoing.buf; - - if (priv->handshake.outgoing.state == GF_RDMA_HANDSHAKE_COMPLETE) { - return 0; - } - - pthread_mutex_unlock (&priv->write_mutex); - { - while (priv->handshake.outgoing.state - != GF_RDMA_HANDSHAKE_COMPLETE) - { - switch (priv->handshake.outgoing.state) - { - case GF_RDMA_HANDSHAKE_START: - buf = priv->handshake.outgoing.buf - = GF_CALLOC (1, 256, gf_common_mt_char); - gf_rdma_fill_handshake_data (buf, - &priv->handshake.outgoing, priv); - priv->handshake.outgoing.state - = GF_RDMA_HANDSHAKE_SENDING_DATA; - break; - - case GF_RDMA_HANDSHAKE_SENDING_DATA: - ret = __tcp_writev (this, - &priv->handshake.outgoing.vector, - priv->handshake.outgoing.count, - &priv->handshake.outgoing.pending_vector, - &priv->handshake.outgoing.pending_count); - if (ret == -1) { - goto unlock; - } - - if (ret > 0) { - gf_log (this->name, GF_LOG_TRACE, - "partial header read on NB " - "socket. continue later"); - goto unlock; - } - - if (!ret) { - priv->handshake.outgoing.state - = GF_RDMA_HANDSHAKE_SENT_DATA; - } - break; - - case GF_RDMA_HANDSHAKE_SENT_DATA: - gf_rdma_fill_handshake_ack (buf, - &priv->handshake.outgoing); - priv->handshake.outgoing.state - = GF_RDMA_HANDSHAKE_SENDING_ACK; - break; - - case GF_RDMA_HANDSHAKE_SENDING_ACK: - ret = __tcp_writev (this, - &priv->handshake.outgoing.vector, - priv->handshake.outgoing.count, - &priv->handshake.outgoing.pending_vector, - &priv->handshake.outgoing.pending_count); - - if (ret == -1) { - goto unlock; - } - - if (ret > 0) { - gf_log (this->name, GF_LOG_TRACE, - "partial header read on NB " - "socket. continue later"); - goto unlock; - } - - if (!ret) { - GF_FREE (priv->handshake.outgoing.buf); - priv->handshake.outgoing.buf = NULL; - priv->handshake.outgoing.state - = GF_RDMA_HANDSHAKE_COMPLETE; - } - break; - } - } - } -unlock: - pthread_mutex_unlock (&priv->write_mutex); - - if (ret == -1) { - rpc_transport_disconnect (this); - } else { - ret = 0; - } - - return ret; -} - -static int -gf_rdma_handshake_pollerr (rpc_transport_t *this) -{ - gf_rdma_private_t *priv = this->private; - char need_unref = 0, connected = 0; - - gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, - "%s: peer disconnected, cleaning up", - this->name); - - pthread_mutex_lock (&priv->write_mutex); - { - __gf_rdma_teardown (this); - - connected = priv->connected; - if (priv->sock != -1) { - event_unregister (this->ctx->event_pool, - priv->sock, priv->idx); - need_unref = 1; - - if (close (priv->sock) != 0) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "close () - error: %s", - strerror (errno)); - } - priv->tcp_connected = priv->connected = 0; - priv->sock = -1; - } - - if (priv->handshake.incoming.buf) { - GF_FREE (priv->handshake.incoming.buf); - priv->handshake.incoming.buf = NULL; - } - - priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_START; - - if (priv->handshake.outgoing.buf) { - GF_FREE (priv->handshake.outgoing.buf); - priv->handshake.outgoing.buf = NULL; - } - - priv->handshake.outgoing.state = GF_RDMA_HANDSHAKE_START; - } - pthread_mutex_unlock (&priv->write_mutex); - - if (connected) { - rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, this); - } - - if (need_unref) - rpc_transport_unref (this); - - return 0; -} - - -static int -tcp_connect_finish (rpc_transport_t *this) -{ - gf_rdma_private_t *priv = NULL; - int error = 0, ret = 0; - - priv = this->private; - pthread_mutex_lock (&priv->write_mutex); - { - ret = __tcp_connect_finish (priv->sock); - - if (!ret) { - this->myinfo.sockaddr_len = - sizeof (this->myinfo.sockaddr); - ret = getsockname (priv->sock, - (struct sockaddr *)&this->myinfo.sockaddr, - &this->myinfo.sockaddr_len); - if (ret == -1) - { - gf_log (this->name, GF_LOG_ERROR, - "getsockname on new client-socket %d " - "failed (%s)", - priv->sock, strerror (errno)); - close (priv->sock); - error = 1; - goto unlock; - } - - gf_rdma_get_transport_identifiers (this); - priv->tcp_connected = 1; - } - - if (ret == -1 && errno != EINPROGRESS) { - gf_log (this->name, GF_LOG_ERROR, - "tcp connect to %s failed (%s)", - this->peerinfo.identifier, strerror (errno)); - error = 1; - } + ret = __gf_rdma_disconnect (this); } -unlock: pthread_mutex_unlock (&priv->write_mutex); - if (error) { - rpc_transport_disconnect (this); - } - return ret; } -static int -gf_rdma_event_handler (int fd, int idx, void *data, - int poll_in, int poll_out, int poll_err) -{ - rpc_transport_t *this = NULL; - gf_rdma_private_t *priv = NULL; - gf_rdma_options_t *options = NULL; - int ret = 0; - - this = data; - priv = this->private; - if (!priv->tcp_connected) { - ret = tcp_connect_finish (this); - if (priv->tcp_connected) { - options = &priv->options; - - priv->peer.send_count = options->send_count; - priv->peer.recv_count = options->recv_count; - priv->peer.send_size = options->send_size; - priv->peer.recv_size = options->recv_size; - - if ((ret = gf_rdma_create_qp (this)) < 0) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: could not create QP", - this->name); - rpc_transport_disconnect (this); - } - } - } - - if (!ret && poll_out && priv->tcp_connected) { - ret = gf_rdma_handshake_pollout (this); - } - - if (!ret && !poll_err && poll_in && priv->tcp_connected) { - if (priv->handshake.incoming.state - == GF_RDMA_HANDSHAKE_COMPLETE) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: pollin received on tcp socket (peer: %s) " - "after handshake is complete", - this->name, this->peerinfo.identifier); - gf_rdma_handshake_pollerr (this); - return 0; - } - ret = gf_rdma_handshake_pollin (this); - } - - if (ret < 0 || poll_err) { - ret = gf_rdma_handshake_pollerr (this); - } - - return 0; -} - -static int -__tcp_nonblock (int fd) -{ - int flags = 0; - int ret = -1; - - flags = fcntl (fd, F_GETFL); - - if (flags != -1) - ret = fcntl (fd, F_SETFL, flags | O_NONBLOCK); - - return ret; -} static int32_t gf_rdma_connect (struct rpc_transport *this, int port) { gf_rdma_private_t *priv = NULL; int32_t ret = 0; - gf_boolean_t non_blocking = 1; union gf_sock_union sock_union = {{0, }, }; socklen_t sockaddr_len = 0; + gf_rdma_peer_t *peer = NULL; + gf_rdma_ctx_t *rdma_ctx = NULL; + gf_boolean_t connected = _gf_false; priv = this->private; + peer = &priv->peer; + + rpc_transport_ref (this); + ret = gf_rdma_client_get_remote_sockaddr (this, &sock_union.sa, &sockaddr_len, port); if (ret != 0) { gf_log (this->name, GF_LOG_DEBUG, "cannot get remote address to connect"); - return ret; + goto out; } + rdma_ctx = this->ctx->ib; pthread_mutex_lock (&priv->write_mutex); { - if (priv->sock != -1) { - ret = 0; + if (peer->cm_id != NULL) { + ret = -1; + errno = EINPROGRESS; + connected = _gf_true; goto unlock; } - priv->sock = socket (sock_union.sa.sa_family, SOCK_STREAM, 0); + priv->entity = GF_RDMA_CLIENT; - if (priv->sock == -1) { + ret = rdma_create_id (rdma_ctx->rdma_cm_event_channel, + &peer->cm_id, this, RDMA_PS_TCP); + if (ret != 0) { gf_log (this->name, GF_LOG_ERROR, - "socket () - error: %s", strerror (errno)); + "creation of rdma_cm_id failed (%s)", + strerror (errno)); ret = -errno; goto unlock; } - gf_log (this->name, GF_LOG_TRACE, - "socket fd = %d", priv->sock); - memcpy (&this->peerinfo.sockaddr, &sock_union.storage, sockaddr_len); this->peerinfo.sockaddr_len = sockaddr_len; @@ -4587,201 +4341,84 @@ gf_rdma_connect (struct rpc_transport *this, int port) ((struct sockaddr *) &this->myinfo.sockaddr)->sa_family = ((struct sockaddr *)&this->peerinfo.sockaddr)->sa_family; - if (non_blocking) - { - ret = __tcp_nonblock (priv->sock); - - if (ret == -1) - { - gf_log (this->name, GF_LOG_ERROR, - "could not set socket %d to non " - "blocking mode (%s)", - priv->sock, strerror (errno)); - close (priv->sock); - priv->sock = -1; - goto unlock; - } - } - ret = gf_rdma_client_bind (this, (struct sockaddr *)&this->myinfo.sockaddr, &this->myinfo.sockaddr_len, - priv->sock); - if (ret == -1) - { + peer->cm_id); + if (ret != 0) { gf_log (this->name, GF_LOG_WARNING, "client bind failed: %s", strerror (errno)); - close (priv->sock); - priv->sock = -1; goto unlock; } - ret = connect (priv->sock, - (struct sockaddr *)&this->peerinfo.sockaddr, - this->peerinfo.sockaddr_len); - if (ret == -1 && errno != EINPROGRESS) - { - gf_log (this->name, GF_LOG_ERROR, - "connection attempt failed (%s)", + ret = rdma_resolve_addr (peer->cm_id, NULL, &sock_union.sa, + 2000); + if (ret != 0) { + gf_log (this->name, GF_LOG_WARNING, + "rdma_resolve_addr failed (%s)", strerror (errno)); - close (priv->sock); - priv->sock = -1; goto unlock; } - priv->tcp_connected = priv->connected = 0; - - rpc_transport_ref (this); - - priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_START; - priv->handshake.outgoing.state = GF_RDMA_HANDSHAKE_START; - - priv->idx = event_register (this->ctx->event_pool, - priv->sock, gf_rdma_event_handler, - this, 1, 1); + priv->connected = 0; } unlock: pthread_mutex_unlock (&priv->write_mutex); - return ret; -} - -static int -gf_rdma_server_event_handler (int fd, int idx, void *data, - int poll_in, int poll_out, int poll_err) -{ - int32_t main_sock = -1; - rpc_transport_t *this = NULL, *trans = NULL; - gf_rdma_private_t *priv = NULL; - gf_rdma_private_t *trans_priv = NULL; - gf_rdma_options_t *options = NULL; - - if (!poll_in) { - return 0; - } - - trans = data; - trans_priv = (gf_rdma_private_t *) trans->private; - - this = GF_CALLOC (1, sizeof (rpc_transport_t), - gf_common_mt_rpc_transport_t); - if (this == NULL) { - return -1; - } - - this->listener = trans; - - priv = GF_CALLOC (1, sizeof (gf_rdma_private_t), - gf_common_mt_rdma_private_t); - if (priv == NULL) { - GF_FREE (priv); - return -1; - } - this->private = priv; - /* Copy all the rdma related values in priv, from trans_priv - as other than QP, all the values remain same */ - priv->device = trans_priv->device; - priv->options = trans_priv->options; - priv->is_server = 1; - priv->listener = trans; - - options = &priv->options; - - this->ops = trans->ops; - this->init = trans->init; - this->fini = trans->fini; - this->ctx = trans->ctx; - this->name = gf_strdup (trans->name); - this->notify = trans->notify; - this->mydata = trans->mydata; - - memcpy (&this->myinfo.sockaddr, &trans->myinfo.sockaddr, - trans->myinfo.sockaddr_len); - this->myinfo.sockaddr_len = trans->myinfo.sockaddr_len; - - main_sock = (trans_priv)->sock; - this->peerinfo.sockaddr_len = sizeof (this->peerinfo.sockaddr); - priv->sock = accept (main_sock, - (struct sockaddr *)&this->peerinfo.sockaddr, - &this->peerinfo.sockaddr_len); - if (priv->sock == -1) { - gf_log ("rdma/server", GF_LOG_ERROR, - "accept() failed: %s", - strerror (errno)); - GF_FREE (this->private); - GF_FREE (this); - return -1; - } - - priv->peer.trans = this; - rpc_transport_ref (this); - - gf_rdma_get_transport_identifiers (this); - - priv->tcp_connected = 1; - priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_START; - priv->handshake.outgoing.state = GF_RDMA_HANDSHAKE_START; - - priv->peer.send_count = options->send_count; - priv->peer.recv_count = options->recv_count; - priv->peer.send_size = options->send_size; - priv->peer.recv_size = options->recv_size; - INIT_LIST_HEAD (&priv->peer.ioq); +out: + if (ret != 0) { + if (!connected) { + gf_rdma_teardown (this); + } - if (gf_rdma_create_qp (this) < 0) { - gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR, - "%s: could not create QP", - this->name); - rpc_transport_disconnect (this); - return -1; + rpc_transport_unref (this); } - priv->idx = event_register (this->ctx->event_pool, priv->sock, - gf_rdma_event_handler, this, 1, 1); - - pthread_mutex_init (&priv->read_mutex, NULL); - pthread_mutex_init (&priv->write_mutex, NULL); - pthread_mutex_init (&priv->recv_mutex, NULL); - /* pthread_cond_init (&priv->recv_cond, NULL); */ - return 0; + return ret; } + static int32_t gf_rdma_listen (rpc_transport_t *this) { union gf_sock_union sock_union = {{0, }, }; socklen_t sockaddr_len = 0; gf_rdma_private_t *priv = NULL; - int opt = 1, ret = 0; + gf_rdma_peer_t *peer = NULL; + int ret = 0; + gf_rdma_ctx_t *rdma_ctx = NULL; char service[NI_MAXSERV], host[NI_MAXHOST]; priv = this->private; - memset (&sock_union, 0, sizeof (sock_union)); - ret = gf_rdma_server_get_local_sockaddr (this, - &sock_union.sa, + peer = &priv->peer; + + priv->entity = GF_RDMA_SERVER_LISTENER; + + rdma_ctx = this->ctx->ib; + + ret = gf_rdma_server_get_local_sockaddr (this, &sock_union.sa, &sockaddr_len); if (ret != 0) { - gf_log (this->name, GF_LOG_DEBUG, + gf_log (this->name, GF_LOG_WARNING, "cannot find network address of server to bind to"); goto err; } - priv->sock = socket (sock_union.sa.sa_family, SOCK_STREAM, 0); - if (priv->sock == -1) { - gf_log ("rdma/server", GF_LOG_CRITICAL, - "init: failed to create socket, error: %s", + ret = rdma_create_id (rdma_ctx->rdma_cm_event_channel, + &peer->cm_id, this, RDMA_PS_TCP); + if (ret != 0) { + gf_log (this->name, GF_LOG_WARNING, + "creation of rdma_cm_id failed (%s)", strerror (errno)); - GF_FREE (this->private); - ret = -1; goto err; } - memcpy (&this->myinfo.sockaddr, &sock_union.storage, sockaddr_len); + memcpy (&this->myinfo.sockaddr, &sock_union.storage, + sockaddr_len); this->myinfo.sockaddr_len = sockaddr_len; ret = getnameinfo ((struct sockaddr *)&this->myinfo.sockaddr, - this->myinfo.sockaddr_len, - host, sizeof (host), + this->myinfo.sockaddr_len, host, sizeof (host), service, sizeof (service), NI_NUMERICHOST); if (ret != 0) { @@ -4789,34 +4426,38 @@ gf_rdma_listen (rpc_transport_t *this) "getnameinfo failed (%s)", gai_strerror (ret)); goto err; } + sprintf (this->myinfo.identifier, "%s:%s", host, service); - setsockopt (priv->sock, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof (opt)); - if (bind (priv->sock, &sock_union.sa, sockaddr_len) != 0) { - ret = -1; - gf_log ("rdma/server", GF_LOG_ERROR, - "init: failed to bind to socket for %s (%s)", - this->myinfo.identifier, strerror (errno)); + ret = rdma_bind_addr (peer->cm_id, &sock_union.sa); + if (ret != 0) { + gf_log (this->name, GF_LOG_WARNING, + "rdma_bind_addr failed (%s)", strerror (errno)); goto err; } - if (listen (priv->sock, 10) != 0) { - gf_log ("rdma/server", GF_LOG_ERROR, - "init: listen () failed on socket for %s (%s)", - this->myinfo.identifier, strerror (errno)); - ret = -1; + ret = rdma_listen (peer->cm_id, 10); + if (ret != 0) { + gf_log (this->name, GF_LOG_WARNING, + "rdma_listen failed (%s)", strerror (errno)); goto err; } - /* Register the main socket */ - priv->idx = event_register (this->ctx->event_pool, priv->sock, - gf_rdma_server_event_handler, - rpc_transport_ref (this), 1, 0); + rpc_transport_ref (this); + ret = 0; err: + if (ret < 0) { + if (peer->cm_id != NULL) { + rdma_destroy_id (peer->cm_id); + peer->cm_id = NULL; + } + } + return ret; } + struct rpc_transport_ops tops = { .submit_request = gf_rdma_submit_request, .submit_reply = gf_rdma_submit_reply, @@ -4835,7 +4476,6 @@ init (rpc_transport_t *this) return -1; this->private = priv; - priv->sock = -1; if (gf_rdma_init (this)) { gf_log (this->name, GF_LOG_ERROR, @@ -4859,13 +4499,6 @@ fini (struct rpc_transport *this) if (priv) { pthread_mutex_destroy (&priv->recv_mutex); pthread_mutex_destroy (&priv->write_mutex); - pthread_mutex_destroy (&priv->read_mutex); - - /* pthread_cond_destroy (&priv->recv_cond); */ - if (priv->sock != -1) { - event_unregister (this->ctx->event_pool, - priv->sock, priv->idx); - } gf_log (this->name, GF_LOG_TRACE, "called fini on transport: %p", this); diff --git a/rpc/rpc-transport/rdma/src/rdma.h b/rpc/rpc-transport/rdma/src/rdma.h index 687d6005f..7f76244f0 100644 --- a/rpc/rpc-transport/rdma/src/rdma.h +++ b/rpc/rpc-transport/rdma/src/rdma.h @@ -29,6 +29,7 @@ #include <list.h> #include <arpa/inet.h> #include <infiniband/verbs.h> +#include <rdma/rdma_cma.h> /* FIXME: give appropriate values to these macros */ #define GF_DEFAULT_RDMA_LISTEN_PORT (GF_DEFAULT_BASE_PORT + 1) @@ -230,30 +231,33 @@ typedef enum __gf_rdma_send_post_type { /* represents one communication peer, two per transport_t */ struct __gf_rdma_peer { - rpc_transport_t *trans; - struct ibv_qp *qp; + rpc_transport_t *trans; + struct rdma_cm_id *cm_id; + struct ibv_qp *qp; + pthread_t rdma_event_thread; + char quota_set; int32_t recv_count; int32_t send_count; int32_t recv_size; int32_t send_size; - int32_t quota; + int32_t quota; union { - struct list_head ioq; + struct list_head ioq; struct { - gf_rdma_ioq_t *ioq_next; - gf_rdma_ioq_t *ioq_prev; + gf_rdma_ioq_t *ioq_next; + gf_rdma_ioq_t *ioq_prev; }; }; /* QP attributes, needed to connect with remote QP */ - int32_t local_lid; - int32_t local_psn; - int32_t local_qpn; - int32_t remote_lid; - int32_t remote_psn; - int32_t remote_qpn; + int32_t local_lid; + int32_t local_psn; + int32_t local_qpn; + int32_t remote_lid; + int32_t remote_psn; + int32_t remote_qpn; }; typedef struct __gf_rdma_peer gf_rdma_peer_t; @@ -320,33 +324,19 @@ struct __gf_rdma_device { struct ibv_comp_channel *send_chan, *recv_chan; struct ibv_cq *send_cq, *recv_cq; gf_rdma_queue_t sendq, recvq; - pthread_t send_thread, recv_thread; + pthread_t send_thread, recv_thread, async_event_thread; struct mem_pool *request_ctx_pool; struct mem_pool *ioq_pool; struct mem_pool *reply_info_pool; }; typedef struct __gf_rdma_device gf_rdma_device_t; -typedef enum { - GF_RDMA_HANDSHAKE_START = 0, - GF_RDMA_HANDSHAKE_SENDING_DATA, - GF_RDMA_HANDSHAKE_RECEIVING_DATA, - GF_RDMA_HANDSHAKE_SENT_DATA, - GF_RDMA_HANDSHAKE_RECEIVED_DATA, - GF_RDMA_HANDSHAKE_SENDING_ACK, - GF_RDMA_HANDSHAKE_RECEIVING_ACK, - GF_RDMA_HANDSHAKE_RECEIVED_ACK, - GF_RDMA_HANDSHAKE_COMPLETE, -} gf_rdma_handshake_state_t; - -struct gf_rdma_nbio { - int state; - char *buf; - int count; - struct iovec vector; - struct iovec *pending_vector; - int pending_count; +struct __gf_rdma_ctx { + gf_rdma_device_t *device; + struct rdma_event_channel *rdma_cm_event_channel; + pthread_t rdma_cm_thread; }; +typedef struct __gf_rdma_ctx gf_rdma_ctx_t; struct __gf_rdma_request_context { struct ibv_mr *mr[GF_RDMA_MAX_SEGMENTS]; @@ -358,46 +348,35 @@ struct __gf_rdma_request_context { }; typedef struct __gf_rdma_request_context gf_rdma_request_context_t; +typedef enum { + GF_RDMA_SERVER_LISTENER, + GF_RDMA_SERVER, + GF_RDMA_CLIENT, +} gf_rdma_transport_entity_t; + struct __gf_rdma_private { - int32_t sock; - int32_t idx; - unsigned char connected; - unsigned char tcp_connected; - unsigned char ib_connected; - in_addr_t addr; + int32_t idx; + unsigned char connected; + in_addr_t addr; unsigned short port; /* IB Verbs Driver specific variables, pointers */ - gf_rdma_peer_t peer; + gf_rdma_peer_t peer; struct __gf_rdma_device *device; - gf_rdma_options_t options; + gf_rdma_options_t options; /* Used by trans->op->receive */ - char *data_ptr; - int32_t data_offset; - int32_t data_len; + char *data_ptr; + int32_t data_offset; + int32_t data_len; /* Mutex */ - pthread_mutex_t read_mutex; - pthread_mutex_t write_mutex; - pthread_barrier_t handshake_barrier; - char handshake_ret; - char is_server; - rpc_transport_t *listener; - - pthread_mutex_t recv_mutex; - pthread_cond_t recv_cond; - - /* used during gf_rdma_handshake */ - struct { - struct gf_rdma_nbio incoming; - struct gf_rdma_nbio outgoing; - int state; - gf_rdma_header_t header; - char *buf; - size_t size; - } handshake; + pthread_mutex_t write_mutex; + rpc_transport_t *listener; + pthread_mutex_t recv_mutex; + pthread_cond_t recv_cond; + gf_rdma_transport_entity_t entity; }; -typedef struct __gf_rdma_private gf_rdma_private_t; +typedef struct __gf_rdma_private gf_rdma_private_t; #endif /* _XPORT_GF_RDMA_H */ diff --git a/rpc/rpc-transport/socket/src/Makefile.am b/rpc/rpc-transport/socket/src/Makefile.am index 2c918c7e3..71e6ed6ff 100644 --- a/rpc/rpc-transport/socket/src/Makefile.am +++ b/rpc/rpc-transport/socket/src/Makefile.am @@ -3,13 +3,15 @@ noinst_HEADERS = socket.h name.h rpctransport_LTLIBRARIES = socket.la rpctransportdir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/rpc-transport -socket_la_LDFLAGS = -module -avoidversion +socket_la_LDFLAGS = -module -avoid-version socket_la_SOURCES = socket.c name.c -socket_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la +socket_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la -lssl -AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\ +AM_CPPFLAGS = $(GF_CPPFLAGS) \ -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src/ \ - -I$(top_srcdir)/rpc/xdr/src/ -shared -nostartfiles $(GF_CFLAGS) + -I$(top_srcdir)/rpc/xdr/src/ + +AM_CFLAGS = -Wall $(GF_CFLAGS) CLEANFILES = *~ diff --git a/rpc/rpc-transport/socket/src/name.c b/rpc/rpc-transport/socket/src/name.c index 0b2ca5805..1647d5b6b 100644 --- a/rpc/rpc-transport/socket/src/name.c +++ b/rpc/rpc-transport/socket/src/name.c @@ -15,12 +15,6 @@ #include <netdb.h> #include <string.h> -#ifdef CLIENT_PORT_CEILING -#undef CLIENT_PORT_CEILING -#endif - -#define CLIENT_PORT_CEILING 1024 - #ifndef AF_INET_SDP #define AF_INET_SDP 27 #endif @@ -40,9 +34,17 @@ static int32_t af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr, socklen_t sockaddr_len, int ceiling) { - int32_t ret = -1; - /* struct sockaddr_in sin = {0, }; */ - uint16_t port = ceiling - 1; + int32_t ret = -1; + uint16_t port = ceiling - 1; + // by default assume none of the ports are blocked and all are available + gf_boolean_t ports[1024] = {_gf_false,}; + int i = 0; + + ret = gf_process_reserved_ports (ports); + if (ret != 0) { + for (i = 0; i < 1024; i++) + ports[i] = _gf_false; + } while (port) { @@ -57,7 +59,11 @@ af_inet_bind_to_port_lt_ceiling (int fd, struct sockaddr *sockaddr, ((struct sockaddr_in *)sockaddr)->sin_port = htons (port); break; } - + // ignore the reserved ports + if (ports[port] == _gf_true) { + port--; + continue; + } ret = bind (fd, sockaddr, sockaddr_len); if (ret == 0) @@ -134,24 +140,24 @@ client_fill_address_family (rpc_transport_t *this, sa_family_t *sa_family) if (!(remote_host_data || connect_path_data) || (remote_host_data && connect_path_data)) { gf_log (this->name, GF_LOG_ERROR, - "transport.address-family not specified and " - "not able to determine the " - "same from other options (remote-host:%s and " - "transport.unix.connect-path:%s)", + "transport.address-family not specified. " + "Could not guess default value from (remote-host:%s or " + "transport.unix.connect-path:%s) options", data_to_str (remote_host_data), data_to_str (connect_path_data)); + *sa_family = AF_UNSPEC; goto out; } if (remote_host_data) { gf_log (this->name, GF_LOG_DEBUG, "address-family not specified, guessing it " - "to be inet/inet6"); - *sa_family = AF_UNSPEC; + "to be inet from (remote-host: %s)", data_to_str (remote_host_data)); + *sa_family = AF_INET; } else { gf_log (this->name, GF_LOG_DEBUG, "address-family not specified, guessing it " - "to be unix"); + "to be unix from (transport.unix.connect-path: %s)", data_to_str (connect_path_data)); *sa_family = AF_UNIX; } @@ -165,13 +171,11 @@ client_fill_address_family (rpc_transport_t *this, sa_family_t *sa_family) *sa_family = AF_INET6; } else if (!strcasecmp (address_family, "inet-sdp")) { *sa_family = AF_INET_SDP; - } else if (!strcasecmp (address_family, "inet/inet6") - || !strcasecmp (address_family, "inet6/inet")) { - *sa_family = AF_UNSPEC; } else { gf_log (this->name, GF_LOG_ERROR, "unknown address-family (%s) specified", address_family); + *sa_family = AF_UNSPEC; goto out; } } @@ -442,12 +446,12 @@ client_bind (rpc_transport_t *this, case AF_INET6: if (!this->bind_insecure) { ret = af_inet_bind_to_port_lt_ceiling (sock, sockaddr, - *sockaddr_len, CLIENT_PORT_CEILING); + *sockaddr_len, GF_CLIENT_PORT_CEILING); } if (ret == -1) { gf_log (this->name, GF_LOG_DEBUG, "cannot bind inet socket (%d) to port less than %d (%s)", - sock, CLIENT_PORT_CEILING, strerror (errno)); + sock, GF_CLIENT_PORT_CEILING, strerror (errno)); ret = 0; } break; @@ -542,18 +546,16 @@ server_fill_address_family (rpc_transport_t *this, sa_family_t *sa_family) *sa_family = AF_INET_SDP; } else if (!strcasecmp (address_family, "unix")) { *sa_family = AF_UNIX; - } else if (!strcasecmp (address_family, "inet/inet6") - || !strcasecmp (address_family, "inet6/inet")) { - *sa_family = AF_UNSPEC; } else { gf_log (this->name, GF_LOG_ERROR, "unknown address family (%s) specified", address_family); + *sa_family = AF_UNSPEC; goto out; } } else { gf_log (this->name, GF_LOG_DEBUG, - "option address-family not specified, defaulting to inet/inet6"); - *sa_family = AF_UNSPEC; + "option address-family not specified, defaulting to inet"); + *sa_family = AF_INET; } ret = 0; diff --git a/rpc/rpc-transport/socket/src/socket.c b/rpc/rpc-transport/socket/src/socket.c index 6c2d909e4..93da3f296 100644 --- a/rpc/rpc-transport/socket/src/socket.c +++ b/rpc/rpc-transport/socket/src/socket.c @@ -35,97 +35,401 @@ #include <errno.h> #include <netinet/tcp.h> #include <rpc/xdr.h> +#include <sys/ioctl.h> #define GF_LOG_ERRNO(errno) ((errno == ENOTCONN) ? GF_LOG_DEBUG : GF_LOG_ERROR) #define SA(ptr) ((struct sockaddr *)ptr) +#define SSL_ENABLED_OPT "transport.socket.ssl-enabled" +#define SSL_OWN_CERT_OPT "transport.socket.ssl-own-cert" +#define SSL_PRIVATE_KEY_OPT "transport.socket.ssl-private-key" +#define SSL_CA_LIST_OPT "transport.socket.ssl-ca-list" +#define OWN_THREAD_OPT "transport.socket.own-thread" -#define __socket_proto_reset_pending(priv) do { \ - memset (&priv->incoming.frag.vector, 0, \ - sizeof (priv->incoming.frag.vector)); \ - priv->incoming.frag.pending_vector = \ - &priv->incoming.frag.vector; \ - priv->incoming.frag.pending_vector->iov_base = \ - priv->incoming.frag.fragcurrent; \ - priv->incoming.pending_vector = \ - priv->incoming.frag.pending_vector; \ - } while (0); +/* TBD: do automake substitutions etc. (ick) to set these. */ +#if !defined(DEFAULT_CERT_PATH) +#define DEFAULT_CERT_PATH "/etc/ssl/glusterfs.pem" +#endif +#if !defined(DEFAULT_KEY_PATH) +#define DEFAULT_KEY_PATH "/etc/ssl/glusterfs.key" +#endif +#if !defined(DEFAULT_CA_PATH) +#define DEFAULT_CA_PATH "/etc/ssl/glusterfs.ca" +#endif + +#define POLL_MASK_INPUT (POLLIN | POLLPRI) +#define POLL_MASK_OUTPUT (POLLOUT) +#define POLL_MASK_ERROR (POLLERR | POLLHUP | POLLNVAL) + +typedef int SSL_unary_func (SSL *); +typedef int SSL_trinary_func (SSL *, void *, int); + +#define __socket_proto_reset_pending(priv) do { \ + struct gf_sock_incoming_frag *frag; \ + frag = &priv->incoming.frag; \ + \ + memset (&frag->vector, 0, sizeof (frag->vector)); \ + frag->pending_vector = &frag->vector; \ + frag->pending_vector->iov_base = frag->fragcurrent; \ + priv->incoming.pending_vector = frag->pending_vector; \ + } while (0) #define __socket_proto_update_pending(priv) \ do { \ - uint32_t remaining_fragsize = 0; \ - if (priv->incoming.frag.pending_vector->iov_len == 0) { \ - remaining_fragsize = RPC_FRAGSIZE (priv->incoming.fraghdr) \ - - priv->incoming.frag.bytes_read; \ + uint32_t remaining; \ + struct gf_sock_incoming_frag *frag; \ + frag = &priv->incoming.frag; \ + if (frag->pending_vector->iov_len == 0) { \ + remaining = (RPC_FRAGSIZE (priv->incoming.fraghdr) \ + - frag->bytes_read); \ \ - priv->incoming.frag.pending_vector->iov_len = \ - remaining_fragsize > priv->incoming.frag.remaining_size \ - ? priv->incoming.frag.remaining_size : remaining_fragsize; \ + frag->pending_vector->iov_len = \ + (remaining > frag->remaining_size) \ + ? frag->remaining_size : remaining; \ \ - priv->incoming.frag.remaining_size -= \ - priv->incoming.frag.pending_vector->iov_len; \ + frag->remaining_size -= \ + frag->pending_vector->iov_len; \ } \ - } while (0); + } while (0) #define __socket_proto_update_priv_after_read(priv, ret, bytes_read) \ { \ - priv->incoming.frag.fragcurrent += bytes_read; \ - priv->incoming.frag.bytes_read += bytes_read; \ + struct gf_sock_incoming_frag *frag; \ + frag = &priv->incoming.frag; \ \ - if ((ret > 0) || (priv->incoming.frag.remaining_size != 0)) { \ - if (priv->incoming.frag.remaining_size != 0 && ret == 0) { \ + frag->fragcurrent += bytes_read; \ + frag->bytes_read += bytes_read; \ + \ + if ((ret > 0) || (frag->remaining_size != 0)) { \ + if (frag->remaining_size != 0 && ret == 0) { \ __socket_proto_reset_pending (priv); \ } \ \ - gf_log (this->name, GF_LOG_TRACE, "partial read on non-blocking socket"); \ + gf_log (this->name, GF_LOG_TRACE, \ + "partial read on non-blocking socket"); \ \ break; \ } \ } -#define __socket_proto_init_pending(priv, size) \ +#define __socket_proto_init_pending(priv,size) \ do { \ - uint32_t remaining_fragsize = 0; \ - remaining_fragsize = RPC_FRAGSIZE (priv->incoming.fraghdr) \ - - priv->incoming.frag.bytes_read; \ + uint32_t remaining = 0; \ + struct gf_sock_incoming_frag *frag; \ + frag = &priv->incoming.frag; \ + \ + remaining = (RPC_FRAGSIZE (priv->incoming.fraghdr) \ + - frag->bytes_read); \ \ - __socket_proto_reset_pending (priv); \ + __socket_proto_reset_pending (priv); \ \ - priv->incoming.frag.pending_vector->iov_len = \ - remaining_fragsize > size ? size : remaining_fragsize; \ + frag->pending_vector->iov_len = \ + (remaining > size) ? size : remaining; \ \ - priv->incoming.frag.remaining_size = \ - size - priv->incoming.frag.pending_vector->iov_len; \ + frag->remaining_size = (size - frag->pending_vector->iov_len); \ \ - } while (0); + } while(0) /* This will be used in a switch case and breaks from the switch case if all * the pending data is not read. */ #define __socket_proto_read(priv, ret) \ - { \ + { \ size_t bytes_read = 0; \ + struct gf_sock_incoming *in; \ + in = &priv->incoming; \ \ __socket_proto_update_pending (priv); \ \ ret = __socket_readv (this, \ - priv->incoming.pending_vector, 1, \ - &priv->incoming.pending_vector, \ - &priv->incoming.pending_count, \ + in->pending_vector, 1, \ + &in->pending_vector, \ + &in->pending_count, \ &bytes_read); \ - if (ret == -1) { \ - gf_log (this->name, GF_LOG_WARNING, \ - "reading from socket failed. Error (%s), " \ - "peer (%s)", strerror (errno), \ - this->peerinfo.identifier); \ + if (ret == -1) \ break; \ - } \ __socket_proto_update_priv_after_read (priv, ret, bytes_read); \ } +static int socket_init (rpc_transport_t *this); + +static void +ssl_dump_error_stack (const char *caller) +{ + unsigned long errnum = 0; + char errbuf[120] = {0,}; + + /* OpenSSL docs explicitly give 120 as the error-string length. */ + + while ((errnum = ERR_get_error())) { + ERR_error_string(errnum,errbuf); + gf_log(caller,GF_LOG_ERROR," %s",errbuf); + } +} + +static int +ssl_do (rpc_transport_t *this, void *buf, size_t len, SSL_trinary_func *func) +{ + int r = (-1); + struct pollfd pfd = {-1,}; + socket_private_t *priv = NULL; + + GF_VALIDATE_OR_GOTO(this->name,this->private,out); + priv = this->private; + + for (;;) { + if (buf) { + if (priv->connected == -1) { + /* + * Fields in the SSL structure (especially + * the BIO pointers) are not valid at this + * point, so we'll segfault if we pass them + * to SSL_read/SSL_write. + */ + gf_log(this->name,GF_LOG_INFO, + "lost connection in %s", __func__); + break; + } + r = func(priv->ssl_ssl,buf,len); + } + else { + /* + * We actually need these functions to get to + * priv->connected == 1. + */ + r = ((SSL_unary_func *)func)(priv->ssl_ssl); + } + switch (SSL_get_error(priv->ssl_ssl,r)) { + case SSL_ERROR_NONE: + return r; + case SSL_ERROR_WANT_READ: + pfd.fd = priv->sock; + pfd.events = POLLIN; + if (poll(&pfd,1,-1) < 0) { + gf_log(this->name,GF_LOG_ERROR,"poll error %d", + errno); + } + break; + case SSL_ERROR_WANT_WRITE: + pfd.fd = priv->sock; + pfd.events = POLLOUT; + if (poll(&pfd,1,-1) < 0) { + gf_log(this->name,GF_LOG_ERROR,"poll error %d", + errno); + } + break; + case SSL_ERROR_SYSCALL: + /* This is what we get when remote disconnects. */ + gf_log(this->name,GF_LOG_DEBUG, + "syscall error (probably remote disconnect)"); + errno = ENODATA; + goto out; + default: + errno = EIO; + goto out; /* "break" would just loop again */ + } + } +out: + return -1; +} + +#define ssl_connect_one(t) ssl_do((t),NULL,0,(SSL_trinary_func *)SSL_connect) +#define ssl_accept_one(t) ssl_do((t),NULL,0,(SSL_trinary_func *)SSL_accept) +#define ssl_read_one(t,b,l) ssl_do((t),(b),(l),(SSL_trinary_func *)SSL_read) +#define ssl_write_one(t,b,l) ssl_do((t),(b),(l),(SSL_trinary_func *)SSL_write) -int socket_init (rpc_transport_t *this); +static int +ssl_setup_connection (rpc_transport_t *this, int server) +{ + X509 *peer = NULL; + char peer_CN[256] = ""; + int ret = -1; + socket_private_t *priv = NULL; + + GF_VALIDATE_OR_GOTO(this->name,this->private,done); + priv = this->private; + + priv->ssl_ssl = SSL_new(priv->ssl_ctx); + if (!priv->ssl_ssl) { + gf_log(this->name,GF_LOG_ERROR,"SSL_new failed"); + ssl_dump_error_stack(this->name); + goto done; + } + priv->ssl_sbio = BIO_new_socket(priv->sock,BIO_NOCLOSE); + if (!priv->ssl_sbio) { + gf_log(this->name,GF_LOG_ERROR,"BIO_new_socket failed"); + ssl_dump_error_stack(this->name); + goto free_ssl; + } + SSL_set_bio(priv->ssl_ssl,priv->ssl_sbio,priv->ssl_sbio); + + if (server) { + ret = ssl_accept_one(this); + } + else { + ret = ssl_connect_one(this); + } + + /* Make sure _the call_ succeeded. */ + if (ret < 0) { + goto ssl_error; + } + + /* Make sure _SSL verification_ succeeded, yielding an identity. */ + if (SSL_get_verify_result(priv->ssl_ssl) != X509_V_OK) { + goto ssl_error; + } + peer = SSL_get_peer_certificate(priv->ssl_ssl); + if (!peer) { + goto ssl_error; + } + + /* Finally, everything seems OK. */ + X509_NAME_get_text_by_NID(X509_get_subject_name(peer), + NID_commonName, peer_CN, sizeof(peer_CN)-1); + peer_CN[sizeof(peer_CN)-1] = '\0'; + gf_log(this->name,GF_LOG_INFO,"peer CN = %s", peer_CN); + return 0; + + /* Error paths. */ +ssl_error: + gf_log(this->name,GF_LOG_ERROR,"SSL connect error"); + ssl_dump_error_stack(this->name); +free_ssl: + SSL_free(priv->ssl_ssl); + priv->ssl_ssl = NULL; +done: + return ret; +} + + +static void +ssl_teardown_connection (socket_private_t *priv) +{ + SSL_shutdown(priv->ssl_ssl); + SSL_clear(priv->ssl_ssl); + SSL_free(priv->ssl_ssl); + priv->ssl_ssl = NULL; +} + + +static ssize_t +__socket_ssl_readv (rpc_transport_t *this, struct iovec *opvector, int opcount) +{ + socket_private_t *priv = NULL; + int sock = -1; + int ret = -1; + + priv = this->private; + sock = priv->sock; + + if (priv->use_ssl) { + ret = ssl_read_one (this, opvector->iov_base, opvector->iov_len); + } else { + ret = readv (sock, opvector, opcount); + } + + return ret; +} + + +static ssize_t +__socket_ssl_read (rpc_transport_t *this, void *buf, size_t count) +{ + struct iovec iov = {0, }; + int ret = -1; + + iov.iov_base = buf; + iov.iov_len = count; + + ret = __socket_ssl_readv (this, &iov, 1); + + return ret; +} + + +static int +__socket_cached_read (rpc_transport_t *this, struct iovec *opvector, int opcount) +{ + socket_private_t *priv = NULL; + int sock = -1; + struct gf_sock_incoming *in = NULL; + int req_len = -1; + int ret = -1; + + priv = this->private; + sock = priv->sock; + in = &priv->incoming; + req_len = iov_length (opvector, opcount); + + if (in->record_state == SP_STATE_READING_FRAGHDR) { + in->ra_read = 0; + in->ra_served = 0; + in->ra_max = 0; + in->ra_buf = NULL; + goto uncached; + } + + if (!in->ra_max) { + /* first call after passing SP_STATE_READING_FRAGHDR */ + in->ra_max = min (RPC_FRAGSIZE (in->fraghdr), GF_SOCKET_RA_MAX); + /* Note that the in->iobuf is the primary iobuf into which + headers are read into. By using this itself as our + read-ahead cache, we can avoid memory copies in iov_load + */ + in->ra_buf = iobuf_ptr (in->iobuf); + } + + /* fill read-ahead */ + if (in->ra_read < in->ra_max) { + ret = __socket_ssl_read (this, &in->ra_buf[in->ra_read], + (in->ra_max - in->ra_read)); + if (ret > 0) + in->ra_read += ret; + + /* we proceed to test if there is still cached data to + be served even if readahead could not progress */ + } + + /* serve cached */ + if (in->ra_served < in->ra_read) { + ret = iov_load (opvector, opcount, &in->ra_buf[in->ra_served], + min (req_len, (in->ra_read - in->ra_served))); + + in->ra_served += ret; + /* Do not read uncached and cached in the same call */ + goto out; + } + + if (in->ra_read < in->ra_max) + /* If there was no cached data to be served, (and we are + guaranteed to have already performed an attempt to progress + readahead above), and we have not yet read out the full + readahead capacity, then bail out for now without doing + the uncached read below (as that will overtake future cached + read) + */ + goto out; +uncached: + ret = __socket_ssl_readv (this, opvector, opcount); +out: + return ret; +} + +static gf_boolean_t +__does_socket_rwv_error_need_logging (socket_private_t *priv, int write) +{ + int read = !write; + + if (priv->connected == -1) /* Didn't even connect, of course it fails */ + return _gf_false; + + if (read && (priv->read_fail_log == _gf_false)) + return _gf_false; + + return _gf_true; +} /* * return value: @@ -134,7 +438,7 @@ int socket_init (rpc_transport_t *this); * > 0 = incomplete */ -int +static int __socket_rwv (rpc_transport_t *this, struct iovec *vector, int count, struct iovec **pending_vector, int *pending_count, size_t *bytes, int write) @@ -159,9 +463,22 @@ __socket_rwv (rpc_transport_t *this, struct iovec *vector, int count, *bytes = 0; } - while (opcount) { + while (opcount > 0) { + if (opvector->iov_len == 0) { + gf_log(this->name,GF_LOG_DEBUG, + "would have passed zero length to read/write"); + ++opvector; + --opcount; + continue; + } if (write) { - ret = writev (sock, opvector, opcount); + if (priv->use_ssl) { + ret = ssl_write_one(this, + opvector->iov_base, opvector->iov_len); + } + else { + ret = writev (sock, opvector, opcount); + } if (ret == 0 || (ret == -1 && errno == EAGAIN)) { /* done for now */ @@ -169,7 +486,13 @@ __socket_rwv (rpc_transport_t *this, struct iovec *vector, int count, } this->total_bytes_write += ret; } else { - ret = readv (sock, opvector, opcount); + ret = __socket_cached_read (this, opvector, opcount); + + if (ret == 0) { + gf_log(this->name,GF_LOG_DEBUG,"EOF on socket"); + errno = ENODATA; + ret = -1; + } if (ret == -1 && errno == EAGAIN) { /* done for now */ break; @@ -190,9 +513,18 @@ __socket_rwv (rpc_transport_t *this, struct iovec *vector, int count, if (errno == EINTR) continue; - gf_log (this->name, GF_LOG_WARNING, - "%s failed (%s)", write ? "writev" : "readv", - strerror (errno)); + if (__does_socket_rwv_error_need_logging (priv, + write)) { + gf_log (this->name, GF_LOG_WARNING, + "%s on %s failed (%s)", + write ? "writev":"readv", + this->peerinfo.identifier, + strerror (errno)); + } + + if (priv->use_ssl) { + ssl_dump_error_stack(this->name); + } opcount = -1; break; } @@ -204,6 +536,17 @@ __socket_rwv (rpc_transport_t *this, struct iovec *vector, int count, moved = 0; while (moved < ret) { + if (!opcount) { + gf_log(this->name,GF_LOG_DEBUG, + "ran out of iov, moved %d/%d", + moved, ret); + goto ran_out; + } + if (!opvector[0].iov_len) { + opvector++; + opcount--; + continue; + } if ((ret - moved) >= opvector[0].iov_len) { moved += opvector[0].iov_len; opvector++; @@ -213,13 +556,11 @@ __socket_rwv (rpc_transport_t *this, struct iovec *vector, int count, opvector[0].iov_base += (ret - moved); moved += (ret - moved); } - while (opcount && !opvector[0].iov_len) { - opvector++; - opcount--; - } } } +ran_out: + if (pending_vector) *pending_vector = opvector; @@ -231,7 +572,7 @@ out: } -int +static int __socket_readv (rpc_transport_t *this, struct iovec *vector, int count, struct iovec **pending_vector, int *pending_count, size_t *bytes) @@ -245,7 +586,7 @@ __socket_readv (rpc_transport_t *this, struct iovec *vector, int count, } -int +static int __socket_writev (rpc_transport_t *this, struct iovec *vector, int count, struct iovec **pending_vector, int *pending_count) { @@ -258,26 +599,55 @@ __socket_writev (rpc_transport_t *this, struct iovec *vector, int count, } -int +static int +__socket_shutdown (rpc_transport_t *this) +{ + int ret = -1; + socket_private_t *priv = this->private; + + priv->connected = -1; + ret = shutdown (priv->sock, SHUT_RDWR); + if (ret) { + /* its already disconnected.. no need to understand + why it failed to shutdown in normal cases */ + gf_log (this->name, GF_LOG_DEBUG, + "shutdown() returned %d. %s", + ret, strerror (errno)); + } + + return ret; +} + +static int __socket_disconnect (rpc_transport_t *this) { - socket_private_t *priv = NULL; int ret = -1; + socket_private_t *priv = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; + gf_log (this->name, GF_LOG_TRACE, + "disconnecting %p, state=%u gen=%u sock=%d", this, + priv->ot_state, priv->ot_gen, priv->sock); + if (priv->sock != -1) { - priv->connected = -1; - ret = shutdown (priv->sock, SHUT_RDWR); - if (ret) { - /* its already disconnected.. no need to understand - why it failed to shutdown in normal cases */ - gf_log (this->name, GF_LOG_DEBUG, - "shutdown() returned %d. %s", - ret, strerror (errno)); + ret = __socket_shutdown(this); + if (priv->own_thread) { + /* + * Without this, reconnect (= disconnect + connect) + * won't work except by accident. + */ + close(priv->sock); + priv->sock = -1; + gf_log (this->name, GF_LOG_TRACE, + "OT_PLEASE_DIE on %p", this); + priv->ot_state = OT_PLEASE_DIE; + } + else if (priv->use_ssl) { + ssl_teardown_connection(priv); } } @@ -286,7 +656,7 @@ out: } -int +static int __socket_server_bind (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -314,7 +684,7 @@ __socket_server_bind (rpc_transport_t *this) memcpy (&unix_addr, SA (&this->myinfo.sockaddr), this->myinfo.sockaddr_len); reuse_check_sock = socket (AF_UNIX, SOCK_STREAM, 0); - if (reuse_check_sock > 0) { + if (reuse_check_sock >= 0) { ret = connect (reuse_check_sock, SA (&unix_addr), this->myinfo.sockaddr_len); if ((ret == -1) && (ECONNREFUSED == errno)) { @@ -342,7 +712,7 @@ out: } -int +static int __socket_nonblock (int fd) { int flags = 0; @@ -356,8 +726,7 @@ __socket_nonblock (int fd) return ret; } - -int +static int __socket_nodelay (int fd) { int on = 1; @@ -374,7 +743,7 @@ __socket_nodelay (int fd) static int -__socket_keepalive (int fd, int keepalive_intvl, int keepalive_idle) +__socket_keepalive (int fd, int family, int keepalive_intvl, int keepalive_idle) { int on = 1; int ret = -1; @@ -403,18 +772,23 @@ __socket_keepalive (int fd, int keepalive_intvl, int keepalive_idle) goto err; } #else + if (family != AF_INET) + goto done; + ret = setsockopt (fd, IPPROTO_TCP, TCP_KEEPIDLE, &keepalive_idle, sizeof (keepalive_intvl)); if (ret == -1) { gf_log ("socket", GF_LOG_WARNING, - "failed to set keep idle on socket %d", fd); + "failed to set keep idle %d on socket %d, %s", + keepalive_idle, fd, strerror(errno)); goto err; } - ret = setsockopt (fd, IPPROTO_TCP, TCP_KEEPINTVL, &keepalive_intvl, + ret = setsockopt (fd, IPPROTO_TCP , TCP_KEEPINTVL, &keepalive_intvl, sizeof (keepalive_intvl)); if (ret == -1) { gf_log ("socket", GF_LOG_WARNING, - "failed to set keep alive interval on socket %d", fd); + "failed to set keep interval %d on socket %d, %s", + keepalive_intvl, fd, strerror(errno)); goto err; } #endif @@ -428,7 +802,7 @@ err: } -int +static int __socket_connect_finish (int fd) { int ret = -1; @@ -446,7 +820,7 @@ __socket_connect_finish (int fd) } -void +static void __socket_reset (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -467,9 +841,7 @@ __socket_reset (rpc_transport_t *this) iobuf_unref (priv->incoming.iobuf); } - if (priv->incoming.request_info != NULL) { - GF_FREE (priv->incoming.request_info); - } + GF_FREE (priv->incoming.request_info); memset (&priv->incoming, 0, sizeof (priv->incoming)); @@ -485,13 +857,13 @@ out: } -void +static void socket_set_lastfrag (uint32_t *fragsize) { (*fragsize) |= 0x80000000U; } -void +static void socket_set_frag_header_size (uint32_t size, char *haddr) { size = htonl (size); @@ -499,14 +871,14 @@ socket_set_frag_header_size (uint32_t size, char *haddr) } -void +static void socket_set_last_frag_header_size (uint32_t size, char *haddr) { socket_set_lastfrag (&size); socket_set_frag_header_size (size, haddr); } -struct ioq * +static struct ioq * __socket_ioq_new (rpc_transport_t *this, rpc_transport_msg_t *msg) { struct ioq *entry = NULL; @@ -573,7 +945,7 @@ out: } -void +static void __socket_ioq_entry_free (struct ioq *entry) { GF_VALIDATE_OR_GOTO ("socket", entry, out); @@ -590,7 +962,7 @@ out: } -void +static void __socket_ioq_flush (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -611,10 +983,12 @@ out: } -int -__socket_ioq_churn_entry (rpc_transport_t *this, struct ioq *entry) +static int +__socket_ioq_churn_entry (rpc_transport_t *this, struct ioq *entry, int direct) { - int ret = -1; + int ret = -1; + socket_private_t *priv = NULL; + char a_byte = 0; ret = __socket_writev (this, entry->pending_vector, entry->pending_count, @@ -625,13 +999,25 @@ __socket_ioq_churn_entry (rpc_transport_t *this, struct ioq *entry) /* current entry was completely written */ GF_ASSERT (entry->pending_count == 0); __socket_ioq_entry_free (entry); + priv = this->private; + if (priv->own_thread) { + /* + * The pipe should only remain readable if there are + * more entries after this, so drain the byte + * representing this entry. + */ + if (!direct && read(priv->pipe[0],&a_byte,1) < 1) { + gf_log(this->name,GF_LOG_WARNING, + "read error on pipe"); + } + } } return ret; } -int +static int __socket_ioq_churn (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -647,13 +1033,13 @@ __socket_ioq_churn (rpc_transport_t *this) /* pick next entry */ entry = priv->ioq_next; - ret = __socket_ioq_churn_entry (this, entry); + ret = __socket_ioq_churn_entry (this, entry, 0); if (ret != 0) break; } - if (list_empty (&priv->ioq)) { + if (!priv->own_thread && list_empty (&priv->ioq)) { /* all pending writes done, not interested in POLLOUT */ priv->idx = event_select_on (this->ctx->event_pool, priv->sock, priv->idx, -1, 0); @@ -664,7 +1050,7 @@ out: } -int +static int socket_event_poll_err (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -689,7 +1075,7 @@ out: } -int +static int socket_event_poll_out (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -719,43 +1105,45 @@ out: } -inline int +static inline int __socket_read_simple_msg (rpc_transport_t *this) { - socket_private_t *priv = NULL; - int ret = 0; - uint32_t remaining_size = 0; - size_t bytes_read = 0; + int ret = 0; + uint32_t remaining_size = 0; + size_t bytes_read = 0; + socket_private_t *priv = NULL; + struct gf_sock_incoming *in = NULL; + struct gf_sock_incoming_frag *frag = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; - switch (priv->incoming.frag.simple_state) { + in = &priv->incoming; + frag = &in->frag; + + switch (frag->simple_state) { case SP_STATE_SIMPLE_MSG_INIT: - remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr) - - priv->incoming.frag.bytes_read; + remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; __socket_proto_init_pending (priv, remaining_size); - priv->incoming.frag.simple_state = - SP_STATE_READING_SIMPLE_MSG; + frag->simple_state = SP_STATE_READING_SIMPLE_MSG; /* fall through */ case SP_STATE_READING_SIMPLE_MSG: ret = 0; - remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr) - - priv->incoming.frag.bytes_read; + remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; if (remaining_size > 0) { ret = __socket_readv (this, - priv->incoming.pending_vector, 1, - &priv->incoming.pending_vector, - &priv->incoming.pending_count, + in->pending_vector, 1, + &in->pending_vector, + &in->pending_count, &bytes_read); } @@ -767,8 +1155,8 @@ __socket_read_simple_msg (rpc_transport_t *this) break; } - priv->incoming.frag.bytes_read += bytes_read; - priv->incoming.frag.fragcurrent += bytes_read; + frag->bytes_read += bytes_read; + frag->fragcurrent += bytes_read; if (ret > 0) { gf_log (this->name, GF_LOG_TRACE, @@ -777,8 +1165,7 @@ __socket_read_simple_msg (rpc_transport_t *this) } if (ret == 0) { - priv->incoming.frag.simple_state - = SP_STATE_SIMPLE_MSG_INIT; + frag->simple_state = SP_STATE_SIMPLE_MSG_INIT; } } @@ -787,7 +1174,7 @@ out: } -inline int +static inline int __socket_read_simple_request (rpc_transport_t *this) { return __socket_read_simple_msg (this); @@ -804,7 +1191,7 @@ __socket_read_simple_request (rpc_transport_t *this) #define rpc_progver_addr(buf) (buf + RPC_MSGTYPE_SIZE + 8) #define rpc_procnum_addr(buf) (buf + RPC_MSGTYPE_SIZE + 12) -inline int +static inline int __socket_read_vectored_request (rpc_transport_t *this, rpcsvc_vector_sizer vector_sizer) { socket_private_t *priv = NULL; @@ -814,17 +1201,26 @@ __socket_read_vectored_request (rpc_transport_t *this, rpcsvc_vector_sizer vecto struct iobuf *iobuf = NULL; uint32_t remaining_size = 0; ssize_t readsize = 0; - size_t size = 0; + size_t size = 0; + struct gf_sock_incoming *in = NULL; + struct gf_sock_incoming_frag *frag = NULL; + sp_rpcfrag_request_state_t *request = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; - switch (priv->incoming.frag.call_body.request.vector_state) { + /* used to reduce the indirection */ + in = &priv->incoming; + frag = &in->frag; + request = &frag->call_body.request; + + switch (request->vector_state) { case SP_STATE_VECTORED_REQUEST_INIT: - priv->incoming.frag.call_body.request.vector_sizer_state = 0; - addr = rpc_cred_addr (iobuf_ptr (priv->incoming.iobuf)); + request->vector_sizer_state = 0; + + addr = rpc_cred_addr (iobuf_ptr (in->iobuf)); /* also read verf flavour and verflen */ credlen = ntoh32 (*((uint32_t *)addr)) @@ -832,102 +1228,114 @@ __socket_read_vectored_request (rpc_transport_t *this, rpcsvc_vector_sizer vecto __socket_proto_init_pending (priv, credlen); - priv->incoming.frag.call_body.request.vector_state = - SP_STATE_READING_CREDBYTES; + request->vector_state = SP_STATE_READING_CREDBYTES; /* fall through */ case SP_STATE_READING_CREDBYTES: __socket_proto_read (priv, ret); - priv->incoming.frag.call_body.request.vector_state = - SP_STATE_READ_CREDBYTES; + request->vector_state = SP_STATE_READ_CREDBYTES; /* fall through */ case SP_STATE_READ_CREDBYTES: - addr = rpc_verf_addr (priv->incoming.frag.fragcurrent); + addr = rpc_verf_addr (frag->fragcurrent); verflen = ntoh32 (*((uint32_t *)addr)); if (verflen == 0) { - priv->incoming.frag.call_body.request.vector_state - = SP_STATE_READ_VERFBYTES; + request->vector_state = SP_STATE_READ_VERFBYTES; goto sp_state_read_verfbytes; } __socket_proto_init_pending (priv, verflen); - priv->incoming.frag.call_body.request.vector_state - = SP_STATE_READING_VERFBYTES; + request->vector_state = SP_STATE_READING_VERFBYTES; /* fall through */ case SP_STATE_READING_VERFBYTES: __socket_proto_read (priv, ret); - priv->incoming.frag.call_body.request.vector_state = - SP_STATE_READ_VERFBYTES; + request->vector_state = SP_STATE_READ_VERFBYTES; /* fall through */ case SP_STATE_READ_VERFBYTES: sp_state_read_verfbytes: - priv->incoming.frag.call_body.request.vector_sizer_state = - vector_sizer (priv->incoming.frag.call_body.request.vector_sizer_state, - &readsize, - priv->incoming.frag.fragcurrent); + /* set the base_addr 'persistently' across multiple calls + into the state machine */ + in->proghdr_base_addr = frag->fragcurrent; + + request->vector_sizer_state = + vector_sizer (request->vector_sizer_state, + &readsize, in->proghdr_base_addr, + frag->fragcurrent); __socket_proto_init_pending (priv, readsize); - priv->incoming.frag.call_body.request.vector_state - = SP_STATE_READING_PROGHDR; + + request->vector_state = SP_STATE_READING_PROGHDR; /* fall through */ case SP_STATE_READING_PROGHDR: __socket_proto_read (priv, ret); -sp_state_reading_proghdr: - priv->incoming.frag.call_body.request.vector_sizer_state = - vector_sizer (priv->incoming.frag.call_body.request.vector_sizer_state, - &readsize, - priv->incoming.frag.fragcurrent); + + request->vector_state = SP_STATE_READ_PROGHDR; + + /* fall through */ + + case SP_STATE_READ_PROGHDR: +sp_state_read_proghdr: + request->vector_sizer_state = + vector_sizer (request->vector_sizer_state, + &readsize, in->proghdr_base_addr, + frag->fragcurrent); if (readsize == 0) { - priv->incoming.frag.call_body.request.vector_state = - SP_STATE_READ_PROGHDR; - } else { - __socket_proto_init_pending (priv, readsize); - __socket_proto_read (priv, ret); - goto sp_state_reading_proghdr; + request->vector_state = SP_STATE_READ_PROGHDR_XDATA; + goto sp_state_read_proghdr_xdata; } - case SP_STATE_READ_PROGHDR: - if (priv->incoming.payload_vector.iov_base == NULL) { + __socket_proto_init_pending (priv, readsize); - size = RPC_FRAGSIZE (priv->incoming.fraghdr) - - priv->incoming.frag.bytes_read; + request->vector_state = SP_STATE_READING_PROGHDR_XDATA; + + /* fall through */ + + case SP_STATE_READING_PROGHDR_XDATA: + __socket_proto_read (priv, ret); + + request->vector_state = SP_STATE_READ_PROGHDR; + /* check if the vector_sizer() has more to say */ + goto sp_state_read_proghdr; + + case SP_STATE_READ_PROGHDR_XDATA: +sp_state_read_proghdr_xdata: + if (in->payload_vector.iov_base == NULL) { + + size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; iobuf = iobuf_get2 (this->ctx->iobuf_pool, size); if (!iobuf) { ret = -1; break; } - if (priv->incoming.iobref == NULL) { - priv->incoming.iobref = iobref_new (); - if (priv->incoming.iobref == NULL) { + if (in->iobref == NULL) { + in->iobref = iobref_new (); + if (in->iobref == NULL) { ret = -1; iobuf_unref (iobuf); break; } } - iobref_add (priv->incoming.iobref, iobuf); + iobref_add (in->iobref, iobuf); iobuf_unref (iobuf); - priv->incoming.payload_vector.iov_base - = iobuf_ptr (iobuf); + in->payload_vector.iov_base = iobuf_ptr (iobuf); - priv->incoming.frag.fragcurrent = iobuf_ptr (iobuf); + frag->fragcurrent = iobuf_ptr (iobuf); } - priv->incoming.frag.call_body.request.vector_state = - SP_STATE_READING_PROG; + request->vector_state = SP_STATE_READING_PROG; /* fall through */ @@ -938,19 +1346,15 @@ sp_state_reading_proghdr: ret = __socket_read_simple_msg (this); - remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr) - - priv->incoming.frag.bytes_read; + remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; - if ((ret == -1) - || ((ret == 0) - && (remaining_size == 0) - && RPC_LASTFRAG (priv->incoming.fraghdr))) { - priv->incoming.frag.call_body.request.vector_state - = SP_STATE_VECTORED_REQUEST_INIT; - priv->incoming.payload_vector.iov_len - = (unsigned long)priv->incoming.frag.fragcurrent - - (unsigned long) - priv->incoming.payload_vector.iov_base; + if ((ret == -1) || + ((ret == 0) && (remaining_size == 0) + && RPC_LASTFRAG (in->fraghdr))) { + request->vector_state = SP_STATE_VECTORED_REQUEST_INIT; + in->payload_vector.iov_len + = ((unsigned long)frag->fragcurrent + - (unsigned long)in->payload_vector.iov_base); } break; } @@ -959,7 +1363,7 @@ out: return ret; } -inline int +static inline int __socket_read_request (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -968,46 +1372,53 @@ __socket_read_request (rpc_transport_t *this) int ret = -1; char *buf = NULL; rpcsvc_vector_sizer vector_sizer = NULL; + struct gf_sock_incoming *in = NULL; + struct gf_sock_incoming_frag *frag = NULL; + sp_rpcfrag_request_state_t *request = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; - switch (priv->incoming.frag.call_body.request.header_state) { + /* used to reduce the indirection */ + in = &priv->incoming; + frag = &in->frag; + request = &frag->call_body.request; + + switch (request->header_state) { case SP_STATE_REQUEST_HEADER_INIT: __socket_proto_init_pending (priv, RPC_CALL_BODY_SIZE); - priv->incoming.frag.call_body.request.header_state - = SP_STATE_READING_RPCHDR1; + request->header_state = SP_STATE_READING_RPCHDR1; /* fall through */ case SP_STATE_READING_RPCHDR1: __socket_proto_read (priv, ret); - priv->incoming.frag.call_body.request.header_state = - SP_STATE_READ_RPCHDR1; + request->header_state = SP_STATE_READ_RPCHDR1; /* fall through */ case SP_STATE_READ_RPCHDR1: - buf = rpc_prognum_addr (iobuf_ptr (priv->incoming.iobuf)); + buf = rpc_prognum_addr (iobuf_ptr (in->iobuf)); prognum = ntoh32 (*((uint32_t *)buf)); - buf = rpc_progver_addr (iobuf_ptr (priv->incoming.iobuf)); + buf = rpc_progver_addr (iobuf_ptr (in->iobuf)); progver = ntoh32 (*((uint32_t *)buf)); - buf = rpc_procnum_addr (iobuf_ptr (priv->incoming.iobuf)); + buf = rpc_procnum_addr (iobuf_ptr (in->iobuf)); procnum = ntoh32 (*((uint32_t *)buf)); - if (this->listener) { - /* this check is needed as rpcsvc and rpc-clnt actor structures are - * not same */ - vector_sizer = rpcsvc_get_program_vector_sizer ((rpcsvc_t *)this->mydata, - prognum, progver, procnum); + if (priv->is_server) { + /* this check is needed as rpcsvc and rpc-clnt + * actor structures are not same */ + vector_sizer = + rpcsvc_get_program_vector_sizer ((rpcsvc_t *)this->mydata, + prognum, progver, procnum); } if (vector_sizer) { @@ -1016,15 +1427,13 @@ __socket_read_request (rpc_transport_t *this) ret = __socket_read_simple_request (this); } - remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr) - - priv->incoming.frag.bytes_read; + remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; if ((ret == -1) || ((ret == 0) && (remaining_size == 0) - && (RPC_LASTFRAG (priv->incoming.fraghdr)))) { - priv->incoming.frag.call_body.request.header_state = - SP_STATE_REQUEST_HEADER_INIT; + && (RPC_LASTFRAG (in->fraghdr)))) { + request->header_state = SP_STATE_REQUEST_HEADER_INIT; } break; @@ -1035,36 +1444,40 @@ out: } -inline int +static inline int __socket_read_accepted_successful_reply (rpc_transport_t *this) { - socket_private_t *priv = NULL; - int ret = 0; - struct iobuf *iobuf = NULL; - uint32_t gluster_read_rsp_hdr_len = 0; - gfs3_read_rsp read_rsp = {0, }; - size_t size = 0; + socket_private_t *priv = NULL; + int ret = 0; + struct iobuf *iobuf = NULL; + gfs3_read_rsp read_rsp = {0, }; + ssize_t size = 0; + ssize_t default_read_size = 0; + char *proghdr_buf = NULL; + XDR xdr; + struct gf_sock_incoming *in = NULL; + struct gf_sock_incoming_frag *frag = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; - switch (priv->incoming.frag.call_body.reply.accepted_success_state) { + /* used to reduce the indirection */ + in = &priv->incoming; + frag = &in->frag; + + switch (frag->call_body.reply.accepted_success_state) { case SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT: - gluster_read_rsp_hdr_len = xdr_sizeof ((xdrproc_t) xdr_gfs3_read_rsp, - &read_rsp); + default_read_size = xdr_sizeof ((xdrproc_t) xdr_gfs3_read_rsp, + &read_rsp); - if (gluster_read_rsp_hdr_len == 0) { - gf_log (this->name, GF_LOG_ERROR, - "xdr_sizeof on gfs3_read_rsp failed"); - ret = -1; - goto out; - } - __socket_proto_init_pending (priv, gluster_read_rsp_hdr_len); + proghdr_buf = frag->fragcurrent; + + __socket_proto_init_pending (priv, default_read_size); - priv->incoming.frag.call_body.reply.accepted_success_state + frag->call_body.reply.accepted_success_state = SP_STATE_READING_PROC_HEADER; /* fall through */ @@ -1072,13 +1485,42 @@ __socket_read_accepted_successful_reply (rpc_transport_t *this) case SP_STATE_READING_PROC_HEADER: __socket_proto_read (priv, ret); - priv->incoming.frag.call_body.reply.accepted_success_state - = SP_STATE_READ_PROC_HEADER; + /* there can be 'xdata' in read response, figure it out */ + xdrmem_create (&xdr, proghdr_buf, default_read_size, + XDR_DECODE); - if (priv->incoming.payload_vector.iov_base == NULL) { + /* This will fail if there is xdata sent from server, if not, + well and good, we don't need to worry about */ + xdr_gfs3_read_rsp (&xdr, &read_rsp); - size = (RPC_FRAGSIZE (priv->incoming.fraghdr) - - priv->incoming.frag.bytes_read); + free (read_rsp.xdata.xdata_val); + + /* need to round off to proper roof (%4), as XDR packing pads + the end of opaque object with '0' */ + size = roof (read_rsp.xdata.xdata_len, 4); + + if (!size) { + frag->call_body.reply.accepted_success_state + = SP_STATE_READ_PROC_OPAQUE; + goto read_proc_opaque; + } + + __socket_proto_init_pending (priv, size); + + frag->call_body.reply.accepted_success_state + = SP_STATE_READING_PROC_OPAQUE; + + case SP_STATE_READING_PROC_OPAQUE: + __socket_proto_read (priv, ret); + + frag->call_body.reply.accepted_success_state + = SP_STATE_READ_PROC_OPAQUE; + + case SP_STATE_READ_PROC_OPAQUE: + read_proc_opaque: + if (in->payload_vector.iov_base == NULL) { + + size = (RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read); iobuf = iobuf_get2 (this->ctx->iobuf_pool, size); if (iobuf == NULL) { @@ -1086,26 +1528,27 @@ __socket_read_accepted_successful_reply (rpc_transport_t *this) goto out; } - if (priv->incoming.iobref == NULL) { - priv->incoming.iobref = iobref_new (); - if (priv->incoming.iobref == NULL) { + if (in->iobref == NULL) { + in->iobref = iobref_new (); + if (in->iobref == NULL) { ret = -1; iobuf_unref (iobuf); goto out; } } - iobref_add (priv->incoming.iobref, iobuf); + iobref_add (in->iobref, iobuf); iobuf_unref (iobuf); - priv->incoming.payload_vector.iov_base - = iobuf_ptr (iobuf); + in->payload_vector.iov_base = iobuf_ptr (iobuf); - priv->incoming.payload_vector.iov_len = size; + in->payload_vector.iov_len = size; } - priv->incoming.frag.fragcurrent - = priv->incoming.payload_vector.iov_base; + frag->fragcurrent = in->payload_vector.iov_base; + + frag->call_body.reply.accepted_success_state + = SP_STATE_READ_PROC_HEADER; /* fall through */ @@ -1113,9 +1556,8 @@ __socket_read_accepted_successful_reply (rpc_transport_t *this) /* now read the entire remaining msg into new iobuf */ ret = __socket_read_simple_msg (this); if ((ret == -1) - || ((ret == 0) - && RPC_LASTFRAG (priv->incoming.fraghdr))) { - priv->incoming.frag.call_body.reply.accepted_success_state + || ((ret == 0) && RPC_LASTFRAG (in->fraghdr))) { + frag->call_body.reply.accepted_success_state = SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT; } @@ -1129,7 +1571,7 @@ out: #define rpc_reply_verflen_addr(fragcurrent) ((char *)fragcurrent - 4) #define rpc_reply_accept_status_addr(fragcurrent) ((char *)fragcurrent - 4) -inline int +static inline int __socket_read_accepted_reply (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -1137,19 +1579,24 @@ __socket_read_accepted_reply (rpc_transport_t *this) char *buf = NULL; uint32_t verflen = 0, len = 0; uint32_t remaining_size = 0; + struct gf_sock_incoming *in = NULL; + struct gf_sock_incoming_frag *frag = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; + /* used to reduce the indirection */ + in = &priv->incoming; + frag = &in->frag; - switch (priv->incoming.frag.call_body.reply.accepted_state) { + switch (frag->call_body.reply.accepted_state) { case SP_STATE_ACCEPTED_REPLY_INIT: __socket_proto_init_pending (priv, RPC_AUTH_FLAVOUR_N_LENGTH_SIZE); - priv->incoming.frag.call_body.reply.accepted_state + frag->call_body.reply.accepted_state = SP_STATE_READING_REPLY_VERFLEN; /* fall through */ @@ -1157,13 +1604,13 @@ __socket_read_accepted_reply (rpc_transport_t *this) case SP_STATE_READING_REPLY_VERFLEN: __socket_proto_read (priv, ret); - priv->incoming.frag.call_body.reply.accepted_state + frag->call_body.reply.accepted_state = SP_STATE_READ_REPLY_VERFLEN; /* fall through */ case SP_STATE_READ_REPLY_VERFLEN: - buf = rpc_reply_verflen_addr (priv->incoming.frag.fragcurrent); + buf = rpc_reply_verflen_addr (frag->fragcurrent); verflen = ntoh32 (*((uint32_t *) buf)); @@ -1172,7 +1619,7 @@ __socket_read_accepted_reply (rpc_transport_t *this) __socket_proto_init_pending (priv, len); - priv->incoming.frag.call_body.reply.accepted_state + frag->call_body.reply.accepted_state = SP_STATE_READING_REPLY_VERFBYTES; /* fall through */ @@ -1180,19 +1627,19 @@ __socket_read_accepted_reply (rpc_transport_t *this) case SP_STATE_READING_REPLY_VERFBYTES: __socket_proto_read (priv, ret); - priv->incoming.frag.call_body.reply.accepted_state + frag->call_body.reply.accepted_state = SP_STATE_READ_REPLY_VERFBYTES; - buf = rpc_reply_accept_status_addr (priv->incoming.frag.fragcurrent); + buf = rpc_reply_accept_status_addr (frag->fragcurrent); - priv->incoming.frag.call_body.reply.accept_status + frag->call_body.reply.accept_status = ntoh32 (*(uint32_t *) buf); /* fall through */ case SP_STATE_READ_REPLY_VERFBYTES: - if (priv->incoming.frag.call_body.reply.accept_status + if (frag->call_body.reply.accept_status == SUCCESS) { ret = __socket_read_accepted_successful_reply (this); } else { @@ -1202,14 +1649,13 @@ __socket_read_accepted_reply (rpc_transport_t *this) ret = __socket_read_simple_msg (this); } - remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr) - - priv->incoming.frag.bytes_read; + remaining_size = RPC_FRAGSIZE (in->fraghdr) + - frag->bytes_read; if ((ret == -1) - || ((ret == 0) - && (remaining_size == 0) - && (RPC_LASTFRAG (priv->incoming.fraghdr)))) { - priv->incoming.frag.call_body.reply.accepted_state + || ((ret == 0) && (remaining_size == 0) + && (RPC_LASTFRAG (in->fraghdr)))) { + frag->call_body.reply.accepted_state = SP_STATE_ACCEPTED_REPLY_INIT; } @@ -1221,7 +1667,7 @@ out: } -inline int +static inline int __socket_read_denied_reply (rpc_transport_t *this) { return __socket_read_simple_msg (this); @@ -1231,25 +1677,29 @@ __socket_read_denied_reply (rpc_transport_t *this) #define rpc_reply_status_addr(fragcurrent) ((char *)fragcurrent - 4) -inline int +static inline int __socket_read_vectored_reply (rpc_transport_t *this) { socket_private_t *priv = NULL; int ret = 0; char *buf = NULL; uint32_t remaining_size = 0; + struct gf_sock_incoming *in = NULL; + struct gf_sock_incoming_frag *frag = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; + in = &priv->incoming; + frag = &in->frag; - switch (priv->incoming.frag.call_body.reply.status_state) { + switch (frag->call_body.reply.status_state) { case SP_STATE_ACCEPTED_REPLY_INIT: __socket_proto_init_pending (priv, RPC_REPLY_STATUS_SIZE); - priv->incoming.frag.call_body.reply.status_state + frag->call_body.reply.status_state = SP_STATE_READING_REPLY_STATUS; /* fall through */ @@ -1257,37 +1707,33 @@ __socket_read_vectored_reply (rpc_transport_t *this) case SP_STATE_READING_REPLY_STATUS: __socket_proto_read (priv, ret); - buf = rpc_reply_status_addr (priv->incoming.frag.fragcurrent); + buf = rpc_reply_status_addr (frag->fragcurrent); - priv->incoming.frag.call_body.reply.accept_status + frag->call_body.reply.accept_status = ntoh32 (*((uint32_t *) buf)); - priv->incoming.frag.call_body.reply.status_state + frag->call_body.reply.status_state = SP_STATE_READ_REPLY_STATUS; /* fall through */ case SP_STATE_READ_REPLY_STATUS: - if (priv->incoming.frag.call_body.reply.accept_status - == MSG_ACCEPTED) { + if (frag->call_body.reply.accept_status == MSG_ACCEPTED) { ret = __socket_read_accepted_reply (this); } else { ret = __socket_read_denied_reply (this); } - remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr) - - priv->incoming.frag.bytes_read; + remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; if ((ret == -1) - || ((ret == 0) - && (remaining_size == 0) - && (RPC_LASTFRAG (priv->incoming.fraghdr)))) { - priv->incoming.frag.call_body.reply.status_state + || ((ret == 0) && (remaining_size == 0) + && (RPC_LASTFRAG (in->fraghdr)))) { + frag->call_body.reply.status_state = SP_STATE_ACCEPTED_REPLY_INIT; - priv->incoming.payload_vector.iov_len - = (unsigned long)priv->incoming.frag.fragcurrent - - (unsigned long) - priv->incoming.payload_vector.iov_base; + in->payload_vector.iov_len + = (unsigned long)frag->fragcurrent + - (unsigned long)in->payload_vector.iov_base; } break; } @@ -1297,7 +1743,7 @@ out: } -inline int +static inline int __socket_read_simple_reply (rpc_transport_t *this) { return __socket_read_simple_msg (this); @@ -1305,7 +1751,7 @@ __socket_read_simple_reply (rpc_transport_t *this) #define rpc_xid_addr(buf) (buf) -inline int +static inline int __socket_read_reply (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -1313,26 +1759,29 @@ __socket_read_reply (rpc_transport_t *this) int32_t ret = -1; rpc_request_info_t *request_info = NULL; char map_xid = 0; + struct gf_sock_incoming *in = NULL; + struct gf_sock_incoming_frag *frag = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; + in = &priv->incoming; + frag = &in->frag; - buf = rpc_xid_addr (iobuf_ptr (priv->incoming.iobuf)); + buf = rpc_xid_addr (iobuf_ptr (in->iobuf)); - if (priv->incoming.request_info == NULL) { - priv->incoming.request_info = GF_CALLOC (1, - sizeof (*request_info), - gf_common_mt_rpc_trans_reqinfo_t); - if (priv->incoming.request_info == NULL) { + if (in->request_info == NULL) { + in->request_info = GF_CALLOC (1, sizeof (*request_info), + gf_common_mt_rpc_trans_reqinfo_t); + if (in->request_info == NULL) { goto out; } map_xid = 1; } - request_info = priv->incoming.request_info; + request_info = in->request_info; if (map_xid) { request_info->xid = ntoh32 (*((uint32_t *) buf)); @@ -1344,7 +1793,7 @@ __socket_read_reply (rpc_transport_t *this) { ret = rpc_transport_notify (this, RPC_TRANSPORT_MAP_XID_REQUEST, - priv->incoming.request_info); + in->request_info); } pthread_mutex_lock (&priv->lock); @@ -1355,13 +1804,11 @@ __socket_read_reply (rpc_transport_t *this) } } - if ((request_info->prognum == GLUSTER3_1_FOP_PROGRAM) + if ((request_info->prognum == GLUSTER_FOP_PROGRAM) && (request_info->procnum == GF_FOP_READ)) { if (map_xid && request_info->rsp.rsp_payload_count != 0) { - priv->incoming.iobref - = iobref_ref (request_info->rsp.rsp_iobref); - priv->incoming.payload_vector - = *request_info->rsp.rsp_payload; + in->iobref = iobref_ref (request_info->rsp.rsp_iobref); + in->payload_vector = *request_info->rsp.rsp_payload; } ret = __socket_read_vectored_reply (this); @@ -1374,42 +1821,47 @@ out: /* returns the number of bytes yet to be read in a fragment */ -inline int +static inline int __socket_read_frag (rpc_transport_t *this) { socket_private_t *priv = NULL; int32_t ret = 0; char *buf = NULL; uint32_t remaining_size = 0; + struct gf_sock_incoming *in = NULL; + struct gf_sock_incoming_frag *frag = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; + /* used to reduce the indirection */ + in = &priv->incoming; + frag = &in->frag; - switch (priv->incoming.frag.state) { + switch (frag->state) { case SP_STATE_NADA: __socket_proto_init_pending (priv, RPC_MSGTYPE_SIZE); - priv->incoming.frag.state = SP_STATE_READING_MSGTYPE; + frag->state = SP_STATE_READING_MSGTYPE; /* fall through */ case SP_STATE_READING_MSGTYPE: __socket_proto_read (priv, ret); - priv->incoming.frag.state = SP_STATE_READ_MSGTYPE; + frag->state = SP_STATE_READ_MSGTYPE; /* fall through */ case SP_STATE_READ_MSGTYPE: - buf = rpc_msgtype_addr (iobuf_ptr (priv->incoming.iobuf)); - priv->incoming.msg_type = ntoh32 (*((uint32_t *)buf)); + buf = rpc_msgtype_addr (iobuf_ptr (in->iobuf)); + in->msg_type = ntoh32 (*((uint32_t *)buf)); - if (priv->incoming.msg_type == CALL) { + if (in->msg_type == CALL) { ret = __socket_read_request (this); - } else if (priv->incoming.msg_type == REPLY) { + } else if (in->msg_type == REPLY) { ret = __socket_read_reply (this); - } else if (priv->incoming.msg_type == GF_UNIVERSAL_ANSWER) { + } else if (in->msg_type == GF_UNIVERSAL_ANSWER) { gf_log ("rpc", GF_LOG_ERROR, "older version of protocol/process trying to " "connect from %s. use newer version on that node", @@ -1417,19 +1869,17 @@ __socket_read_frag (rpc_transport_t *this) } else { gf_log ("rpc", GF_LOG_ERROR, "wrong MSG-TYPE (%d) received from %s", - priv->incoming.msg_type, + in->msg_type, this->peerinfo.identifier); ret = -1; } - remaining_size = RPC_FRAGSIZE (priv->incoming.fraghdr) - - priv->incoming.frag.bytes_read; + remaining_size = RPC_FRAGSIZE (in->fraghdr) - frag->bytes_read; if ((ret == -1) - || ((ret == 0) - && (remaining_size == 0) - && (RPC_LASTFRAG (priv->incoming.fraghdr)))) { - priv->incoming.frag.state = SP_STATE_NADA; + || ((ret == 0) && (remaining_size == 0) + && (RPC_LASTFRAG (in->fraghdr)))) { + frag->state = SP_STATE_NADA; } break; @@ -1440,31 +1890,36 @@ out: } -inline +static inline void __socket_reset_priv (socket_private_t *priv) { - if (priv->incoming.iobref) { - iobref_unref (priv->incoming.iobref); - priv->incoming.iobref = NULL; + struct gf_sock_incoming *in = NULL; + + /* used to reduce the indirection */ + in = &priv->incoming; + + if (in->iobref) { + iobref_unref (in->iobref); + in->iobref = NULL; } - if (priv->incoming.iobuf) { - iobuf_unref (priv->incoming.iobuf); + if (in->iobuf) { + iobuf_unref (in->iobuf); } - if (priv->incoming.request_info != NULL) { - GF_FREE (priv->incoming.request_info); - priv->incoming.request_info = NULL; + if (in->request_info != NULL) { + GF_FREE (in->request_info); + in->request_info = NULL; } - memset (&priv->incoming.payload_vector, 0, - sizeof (priv->incoming.payload_vector)); + memset (&in->payload_vector, 0, + sizeof (in->payload_vector)); - priv->incoming.iobuf = NULL; + in->iobuf = NULL; } -int +static int __socket_proto_state_machine (rpc_transport_t *this, rpc_transport_pollin_t **pollin) { @@ -1473,46 +1928,40 @@ __socket_proto_state_machine (rpc_transport_t *this, struct iobuf *iobuf = NULL; struct iobref *iobref = NULL; struct iovec vector[2]; + struct gf_sock_incoming *in = NULL; + struct gf_sock_incoming_frag *frag = NULL; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); priv = this->private; - while (priv->incoming.record_state != SP_STATE_COMPLETE) { - switch (priv->incoming.record_state) { + /* used to reduce the indirection */ + in = &priv->incoming; + frag = &in->frag; + + while (in->record_state != SP_STATE_COMPLETE) { + switch (in->record_state) { case SP_STATE_NADA: - priv->incoming.total_bytes_read = 0; - priv->incoming.payload_vector.iov_len = 0; + in->total_bytes_read = 0; + in->payload_vector.iov_len = 0; - priv->incoming.pending_vector = priv->incoming.vector; - priv->incoming.pending_vector->iov_base = - &priv->incoming.fraghdr; + in->pending_vector = in->vector; + in->pending_vector->iov_base = &in->fraghdr; - priv->incoming.pending_vector->iov_len = - sizeof (priv->incoming.fraghdr); + in->pending_vector->iov_len = sizeof (in->fraghdr); - priv->incoming.record_state = SP_STATE_READING_FRAGHDR; + in->record_state = SP_STATE_READING_FRAGHDR; /* fall through */ case SP_STATE_READING_FRAGHDR: - ret = __socket_readv (this, - priv->incoming.pending_vector, 1, - &priv->incoming.pending_vector, - &priv->incoming.pending_count, + ret = __socket_readv (this, in->pending_vector, 1, + &in->pending_vector, + &in->pending_count, NULL); - if (ret == -1) { - if (priv->read_fail_log == 1) { - gf_log (this->name, - ((priv->connected == 1) ? - GF_LOG_WARNING : GF_LOG_DEBUG), - "reading from socket failed. Error (%s)" - ", peer (%s)", strerror (errno), - this->peerinfo.identifier); - } + if (ret == -1) goto out; - } if (ret > 0) { gf_log (this->name, GF_LOG_TRACE, "partial " @@ -1521,44 +1970,40 @@ __socket_proto_state_machine (rpc_transport_t *this, } if (ret == 0) { - priv->incoming.record_state = - SP_STATE_READ_FRAGHDR; + in->record_state = SP_STATE_READ_FRAGHDR; } /* fall through */ case SP_STATE_READ_FRAGHDR: - priv->incoming.fraghdr = ntoh32 (priv->incoming.fraghdr); - priv->incoming.record_state = SP_STATE_READING_FRAG; - priv->incoming.total_bytes_read - += RPC_FRAGSIZE(priv->incoming.fraghdr); + in->fraghdr = ntoh32 (in->fraghdr); + in->total_bytes_read += RPC_FRAGSIZE(in->fraghdr); iobuf = iobuf_get2 (this->ctx->iobuf_pool, - priv->incoming.total_bytes_read + - sizeof (priv->incoming.fraghdr)); + (in->total_bytes_read + + sizeof (in->fraghdr))); if (!iobuf) { ret = -ENOMEM; goto out; } - priv->incoming.iobuf = iobuf; - priv->incoming.iobuf_size = 0; - priv->incoming.frag.fragcurrent = iobuf_ptr (iobuf); + in->iobuf = iobuf; + in->iobuf_size = 0; + frag->fragcurrent = iobuf_ptr (iobuf); + in->record_state = SP_STATE_READING_FRAG; /* fall through */ case SP_STATE_READING_FRAG: ret = __socket_read_frag (this); - if ((ret == -1) - || (priv->incoming.frag.bytes_read != - RPC_FRAGSIZE (priv->incoming.fraghdr))) { + if ((ret == -1) || + (frag->bytes_read != RPC_FRAGSIZE (in->fraghdr))) { goto out; } - priv->incoming.frag.bytes_read = 0; + frag->bytes_read = 0; - if (!RPC_LASTFRAG (priv->incoming.fraghdr)) { - priv->incoming.record_state = - SP_STATE_READING_FRAGHDR; + if (!RPC_LASTFRAG (in->fraghdr)) { + in->record_state = SP_STATE_READING_FRAGHDR; break; } @@ -1567,44 +2012,39 @@ __socket_proto_state_machine (rpc_transport_t *this, */ if (pollin != NULL) { int count = 0; - priv->incoming.iobuf_size - = priv->incoming.total_bytes_read - - priv->incoming.payload_vector.iov_len; + in->iobuf_size = (in->total_bytes_read - + in->payload_vector.iov_len); memset (vector, 0, sizeof (vector)); - if (priv->incoming.iobref == NULL) { - priv->incoming.iobref = iobref_new (); - if (priv->incoming.iobref == NULL) { + if (in->iobref == NULL) { + in->iobref = iobref_new (); + if (in->iobref == NULL) { ret = -1; goto out; } } - vector[count].iov_base - = iobuf_ptr (priv->incoming.iobuf); - vector[count].iov_len - = priv->incoming.iobuf_size; + vector[count].iov_base = iobuf_ptr (in->iobuf); + vector[count].iov_len = in->iobuf_size; - iobref = priv->incoming.iobref; + iobref = in->iobref; count++; - if (priv->incoming.payload_vector.iov_base - != NULL) { - vector[count] - = priv->incoming.payload_vector; + if (in->payload_vector.iov_base != NULL) { + vector[count] = in->payload_vector; count++; } *pollin = rpc_transport_pollin_alloc (this, vector, count, - priv->incoming.iobuf, + in->iobuf, iobref, - priv->incoming.request_info); - iobuf_unref (priv->incoming.iobuf); - priv->incoming.iobuf = NULL; + in->request_info); + iobuf_unref (in->iobuf); + in->iobuf = NULL; if (*pollin == NULL) { gf_log (this->name, GF_LOG_WARNING, @@ -1612,12 +2052,12 @@ __socket_proto_state_machine (rpc_transport_t *this, ret = -1; goto out; } - if (priv->incoming.msg_type == REPLY) + if (in->msg_type == REPLY) (*pollin)->is_reply = 1; - priv->incoming.request_info = NULL; + in->request_info = NULL; } - priv->incoming.record_state = SP_STATE_COMPLETE; + in->record_state = SP_STATE_COMPLETE; break; case SP_STATE_COMPLETE: @@ -1629,8 +2069,8 @@ __socket_proto_state_machine (rpc_transport_t *this, } } - if (priv->incoming.record_state == SP_STATE_COMPLETE) { - priv->incoming.record_state = SP_STATE_NADA; + if (in->record_state == SP_STATE_COMPLETE) { + in->record_state = SP_STATE_NADA; __socket_reset_priv (priv); } @@ -1642,7 +2082,7 @@ out: } -int +static int socket_proto_state_machine (rpc_transport_t *this, rpc_transport_pollin_t **pollin) { @@ -1665,18 +2105,22 @@ out: } -int +static int socket_event_poll_in (rpc_transport_t *this) { int ret = -1; rpc_transport_pollin_t *pollin = NULL; + socket_private_t *priv = this->private; ret = socket_proto_state_machine (this, &pollin); if (pollin != NULL) { + priv->ot_state = OT_CALLBACK; ret = rpc_transport_notify (this, RPC_TRANSPORT_MSG_RECEIVED, pollin); - + if (priv->ot_state == OT_CALLBACK) { + priv->ot_state = OT_RUNNING; + } rpc_transport_pollin_destroy (pollin); } @@ -1684,7 +2128,7 @@ socket_event_poll_in (rpc_transport_t *this) } -int +static int socket_connect_finish (rpc_transport_t *this) { int ret = -1; @@ -1699,9 +2143,11 @@ socket_connect_finish (rpc_transport_t *this) pthread_mutex_lock (&priv->lock); { - if (priv->connected) + if (priv->connected != 0) goto unlock; + get_transport_identifiers (this); + ret = __socket_connect_finish (priv->sock); if (ret == -1 && errno == EINPROGRESS) @@ -1716,8 +2162,6 @@ socket_connect_finish (rpc_transport_t *this) priv->connect_finish_log = 1; } __socket_disconnect (this); - notify_rpc = 1; - event = RPC_TRANSPORT_DISCONNECT; goto unlock; } @@ -1742,7 +2186,6 @@ socket_connect_finish (rpc_transport_t *this) priv->connected = 1; priv->connect_finish_log = 0; event = RPC_TRANSPORT_CONNECT; - get_transport_identifiers (this); } } unlock: @@ -1757,13 +2200,13 @@ out: /* reads rpc_requests during pollin */ -int +static int socket_event_handler (int fd, int idx, void *data, int poll_in, int poll_out, int poll_err) { - rpc_transport_t *this = NULL; + rpc_transport_t *this = NULL; socket_private_t *priv = NULL; - int ret = 0; + int ret = -1; this = data; GF_VALIDATE_OR_GOTO ("socket", this, out); @@ -1773,16 +2216,13 @@ socket_event_handler (int fd, int idx, void *data, THIS = this->xl; priv = this->private; - pthread_mutex_lock (&priv->lock); { priv->idx = idx; } pthread_mutex_unlock (&priv->lock); - if (!priv->connected) { - ret = socket_connect_finish (this); - } + ret = (priv->connected == 1) ? 0 : socket_connect_finish(this); if (!ret && poll_out) { ret = socket_event_poll_out (this); @@ -1798,14 +2238,198 @@ socket_event_handler (int fd, int idx, void *data, "disconnecting now"); socket_event_poll_err (this); rpc_transport_unref (this); - } + } out: - return 0; + return ret; } -int +static void * +socket_poller (void *ctx) +{ + rpc_transport_t *this = ctx; + socket_private_t *priv = this->private; + struct pollfd pfd[2] = {{0,},}; + gf_boolean_t to_write = _gf_false; + int ret = 0; + uint32_t gen = 0; + + priv->ot_state = OT_RUNNING; + + if (priv->use_ssl) { + if (ssl_setup_connection(this,priv->connected) < 0) { + gf_log (this->name,GF_LOG_ERROR, "%s setup failed", + priv->connected ? "server" : "client"); + goto err; + } + } + + if (!priv->bio) { + ret = __socket_nonblock (priv->sock); + if (ret == -1) { + gf_log (this->name, GF_LOG_WARNING, + "NBIO on %d failed (%s)", + priv->sock, strerror (errno)); + goto err; + } + } + + if (priv->connected == 0) { + THIS = this->xl; + ret = socket_connect_finish (this); + if (ret != 0) { + gf_log (this->name, GF_LOG_WARNING, + "asynchronous socket_connect_finish failed"); + } + } + + ret = rpc_transport_notify (this->listener, + RPC_TRANSPORT_ACCEPT, this); + if (ret != 0) { + gf_log (this->name, GF_LOG_WARNING, + "asynchronous rpc_transport_notify failed"); + } + + gen = priv->ot_gen; + for (;;) { + pthread_mutex_lock(&priv->lock); + to_write = !list_empty(&priv->ioq); + pthread_mutex_unlock(&priv->lock); + pfd[0].fd = priv->pipe[0]; + pfd[0].events = POLL_MASK_ERROR; + pfd[0].revents = 0; + pfd[1].fd = priv->sock; + pfd[1].events = POLL_MASK_INPUT | POLL_MASK_ERROR; + pfd[1].revents = 0; + if (to_write) { + pfd[1].events |= POLL_MASK_OUTPUT; + } + else { + pfd[0].events |= POLL_MASK_INPUT; + } + if (poll(pfd,2,-1) < 0) { + gf_log(this->name,GF_LOG_ERROR,"poll failed"); + break; + } + if (pfd[0].revents & POLL_MASK_ERROR) { + gf_log(this->name,GF_LOG_ERROR, + "poll error on pipe"); + break; + } + /* Only glusterd actually seems to need this. */ + THIS = this->xl; + if (pfd[1].revents & POLL_MASK_INPUT) { + ret = socket_event_poll_in(this); + if (ret >= 0) { + /* Suppress errors while making progress. */ + pfd[1].revents &= ~POLL_MASK_ERROR; + } + else if (errno == ENOTCONN) { + ret = 0; + } + if (priv->ot_state == OT_PLEASE_DIE) { + gf_log (this->name, GF_LOG_TRACE, + "OT_IDLE on %p (input request)", + this); + priv->ot_state = OT_IDLE; + break; + } + } + else if (pfd[1].revents & POLL_MASK_OUTPUT) { + ret = socket_event_poll_out(this); + if (ret >= 0) { + /* Suppress errors while making progress. */ + pfd[1].revents &= ~POLL_MASK_ERROR; + } + else if (errno == ENOTCONN) { + ret = 0; + } + if (priv->ot_state == OT_PLEASE_DIE) { + gf_log (this->name, GF_LOG_TRACE, + "OT_IDLE on %p (output request)", + this); + priv->ot_state = OT_IDLE; + break; + } + } + else { + /* + * This usually means that we left poll() because + * somebody pushed a byte onto our pipe. That wakeup + * is why the pipe is there, but once awake we can do + * all the checking we need on the next iteration. + */ + ret = 0; + } + if (pfd[1].revents & POLL_MASK_ERROR) { + gf_log(this->name,GF_LOG_ERROR, + "poll error on socket"); + break; + } + if (ret < 0) { + gf_log(this->name,GF_LOG_ERROR, + "error in polling loop"); + break; + } + if (priv->ot_gen != gen) { + gf_log (this->name, GF_LOG_TRACE, + "generation mismatch, my %u != %u", + gen, priv->ot_gen); + return NULL; + } + } + +err: + /* All (and only) I/O errors should come here. */ + pthread_mutex_lock(&priv->lock); + if (priv->ssl_ssl) { + /* + * We're always responsible for this part, but only actually + * have to do it if we got far enough for ssl_ssl to be valid + * (i.e. errors in ssl_setup_connection don't count). + */ + ssl_teardown_connection(priv); + } + __socket_shutdown(this); + close(priv->sock); + priv->sock = -1; + priv->ot_state = OT_IDLE; + pthread_mutex_unlock(&priv->lock); + rpc_transport_notify (this->listener, RPC_TRANSPORT_DISCONNECT, + this); + rpc_transport_unref (this); + return NULL; +} + + +static void +socket_spawn (rpc_transport_t *this) +{ + socket_private_t *priv = this->private; + + switch (priv->ot_state) { + case OT_IDLE: + case OT_PLEASE_DIE: + break; + default: + gf_log (this->name, GF_LOG_WARNING, + "refusing to start redundant poller"); + return; + } + + priv->ot_gen += 7; + priv->ot_state = OT_SPAWNING; + gf_log (this->name, GF_LOG_TRACE, + "spawning %p with gen %u", this, priv->ot_gen); + + if (gf_thread_create(&priv->thread,NULL,socket_poller,this) != 0) { + gf_log (this->name, GF_LOG_ERROR, + "could not create poll thread"); + } +} + +static int socket_server_event_handler (int fd, int idx, void *data, int poll_in, int poll_out, int poll_err) { @@ -1843,20 +2467,7 @@ socket_server_event_handler (int fd, int idx, void *data, goto unlock; } - if (!priv->bio) { - ret = __socket_nonblock (new_sock); - - if (ret == -1) { - gf_log (this->name, GF_LOG_WARNING, - "NBIO on %d failed (%s)", - new_sock, strerror (errno)); - - close (new_sock); - goto unlock; - } - } - - if (priv->nodelay) { + if (priv->nodelay && (new_sockaddr.ss_family != AF_UNIX)) { ret = __socket_nodelay (new_sock); if (ret == -1) { gf_log (this->name, GF_LOG_WARNING, @@ -1866,8 +2477,10 @@ socket_server_event_handler (int fd, int idx, void *data, } } - if (priv->keepalive) { + if (priv->keepalive && + new_sockaddr.ss_family != AF_UNIX) { ret = __socket_keepalive (new_sock, + new_sockaddr.ss_family, priv->keepaliveintvl, priv->keepaliveidle); if (ret == -1) @@ -1881,6 +2494,15 @@ socket_server_event_handler (int fd, int idx, void *data, if (!new_trans) goto unlock; + ret = pthread_mutex_init(&new_trans->lock, NULL); + if (ret == -1) { + gf_log (this->name, GF_LOG_WARNING, + "pthread_mutex_init() failed: %s", + strerror (errno)); + close (new_sock); + goto unlock; + } + new_trans->name = gf_strdup (this->name); memcpy (&new_trans->peerinfo.sockaddr, &new_sockaddr, @@ -1902,7 +2524,11 @@ socket_server_event_handler (int fd, int idx, void *data, } get_transport_identifiers (new_trans); - socket_init (new_trans); + ret = socket_init(new_trans); + if (ret != 0) { + close(new_sock); + goto unlock; + } new_trans->ops = this->ops; new_trans->init = this->init; new_trans->fini = this->fini; @@ -1913,20 +2539,62 @@ socket_server_event_handler (int fd, int idx, void *data, new_trans->listener = this; new_priv = new_trans->private; + new_priv->use_ssl = priv->use_ssl; + new_priv->sock = new_sock; + new_priv->own_thread = priv->own_thread; + + new_priv->ssl_ctx = priv->ssl_ctx; + if (priv->use_ssl && !priv->own_thread) { + if (ssl_setup_connection(new_trans,1) < 0) { + gf_log(this->name,GF_LOG_ERROR, + "server setup failed"); + close(new_sock); + goto unlock; + } + } + + if (!priv->bio && !priv->own_thread) { + ret = __socket_nonblock (new_sock); + + if (ret == -1) { + gf_log (this->name, GF_LOG_WARNING, + "NBIO on %d failed (%s)", + new_sock, strerror (errno)); + + close (new_sock); + goto unlock; + } + } + pthread_mutex_lock (&new_priv->lock); { - new_priv->sock = new_sock; + /* + * In the own_thread case, this is used to + * indicate that we're initializing a server + * connection. + */ new_priv->connected = 1; + new_priv->is_server = _gf_true; rpc_transport_ref (new_trans); - new_priv->idx = - event_register (ctx->event_pool, - new_sock, - socket_event_handler, - new_trans, 1, 0); + if (new_priv->own_thread) { + if (pipe(new_priv->pipe) < 0) { + gf_log(this->name,GF_LOG_ERROR, + "could not create pipe"); + } + socket_spawn(new_trans); + } + else { + new_priv->idx = + event_register (ctx->event_pool, + new_sock, + socket_event_handler, + new_trans, + 1, 0); + if (new_priv->idx == -1) + ret = -1; + } - if (new_priv->idx == -1) - ret = -1; } pthread_mutex_unlock (&new_priv->lock); if (ret == -1) { @@ -1935,8 +2603,10 @@ socket_server_event_handler (int fd, int idx, void *data, goto unlock; } - ret = rpc_transport_notify (this, RPC_TRANSPORT_ACCEPT, - new_trans); + if (!priv->own_thread) { + ret = rpc_transport_notify (this, + RPC_TRANSPORT_ACCEPT, new_trans); + } } } unlock: @@ -1947,7 +2617,7 @@ out: } -int +static int socket_disconnect (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -1969,7 +2639,7 @@ out: } -int +static int socket_connect (rpc_transport_t *this, int port) { int ret = -1; @@ -1978,7 +2648,9 @@ socket_connect (rpc_transport_t *this, int port) socklen_t sockaddr_len = 0; glusterfs_ctx_t *ctx = NULL; sa_family_t sa_family = {0, }; + char *local_addr = NULL; union gf_sock_union sock_union; + struct sockaddr_in *addr = NULL; GF_VALIDATE_OR_GOTO ("socket", this, err); GF_VALIDATE_OR_GOTO ("socket", this->private, err); @@ -2006,6 +2678,10 @@ socket_connect (rpc_transport_t *this, int port) goto err; } + gf_log (this->name, GF_LOG_TRACE, + "connecting %p, state=%u gen=%u sock=%d", this, + priv->ot_state, priv->ot_gen, priv->sock); + ret = socket_client_get_remote_sockaddr (this, &sock_union.sa, &sockaddr_len, &sa_family); if (ret == -1) { @@ -2016,6 +2692,20 @@ socket_connect (rpc_transport_t *this, int port) if (port > 0) { sock_union.sin.sin_port = htons (port); } + if (ntohs(sock_union.sin.sin_port) == GF_DEFAULT_SOCKET_LISTEN_PORT) { + if (priv->use_ssl) { + gf_log(this->name,GF_LOG_DEBUG, + "disabling SSL for portmapper connection"); + priv->use_ssl = _gf_false; + } + } + else { + if (priv->ssl_enabled && !priv->use_ssl) { + gf_log(this->name,GF_LOG_DEBUG, + "re-enabling SSL for I/O connection"); + priv->use_ssl = _gf_true; + } + } pthread_mutex_lock (&priv->lock); { if (priv->sock != -1) { @@ -2061,7 +2751,7 @@ socket_connect (rpc_transport_t *this, int port) } } - if (priv->nodelay) { + if (priv->nodelay && (sa_family != AF_UNIX)) { ret = __socket_nodelay (priv->sock); if (ret == -1) { @@ -2071,21 +2761,9 @@ socket_connect (rpc_transport_t *this, int port) } } - if (!priv->bio) { - ret = __socket_nonblock (priv->sock); - - if (ret == -1) { - gf_log (this->name, GF_LOG_ERROR, - "NBIO on %d failed (%s)", - priv->sock, strerror (errno)); - close (priv->sock); - priv->sock = -1; - goto unlock; - } - } - - if (priv->keepalive) { + if (priv->keepalive && sa_family != AF_UNIX) { ret = __socket_keepalive (priv->sock, + sa_family, priv->keepaliveintvl, priv->keepaliveidle); if (ret == -1) @@ -2097,6 +2775,15 @@ socket_connect (rpc_transport_t *this, int port) SA (&this->myinfo.sockaddr)->sa_family = SA (&this->peerinfo.sockaddr)->sa_family; + /* If a source addr is explicitly specified, use it */ + ret = dict_get_str (this->options, + "transport.socket.source-addr", + &local_addr); + if (!ret && SA (&this->myinfo.sockaddr)->sa_family == AF_INET) { + addr = (struct sockaddr_in *)(&this->myinfo.sockaddr); + ret = inet_pton (AF_INET, local_addr, &(addr->sin_addr.s_addr)); + } + ret = client_bind (this, SA (&this->myinfo.sockaddr), &this->myinfo.sockaddr_len, priv->sock); if (ret == -1) { @@ -2107,29 +2794,86 @@ socket_connect (rpc_transport_t *this, int port) goto unlock; } + if (!priv->use_ssl && !priv->bio && !priv->own_thread) { + ret = __socket_nonblock (priv->sock); + if (ret == -1) { + gf_log (this->name, GF_LOG_ERROR, + "NBIO on %d failed (%s)", + priv->sock, strerror (errno)); + close (priv->sock); + priv->sock = -1; + goto unlock; + } + } + ret = connect (priv->sock, SA (&this->peerinfo.sockaddr), this->peerinfo.sockaddr_len); if (ret == -1 && ((errno != EINPROGRESS) && (errno != ENOENT))) { - gf_log (this->name, GF_LOG_ERROR, - "connection attempt failed (%s)", - strerror (errno)); + /* For unix path based sockets, the socket path is + * cryptic (md5sum of path) and may not be useful for + * the user in debugging so log it in DEBUG + */ + gf_log (this->name, ((sa_family == AF_UNIX) ? + GF_LOG_DEBUG : GF_LOG_ERROR), + "connection attempt on %s failed, (%s)", + this->peerinfo.identifier, strerror (errno)); close (priv->sock); priv->sock = -1; goto unlock; } - priv->connected = 0; + if (priv->use_ssl && !priv->own_thread) { + ret = ssl_setup_connection(this,0); + if (ret < 0) { + gf_log(this->name,GF_LOG_ERROR, + "client setup failed"); + close(priv->sock); + priv->sock = -1; + goto unlock; + } + } - rpc_transport_ref (this); + if (!priv->bio && !priv->own_thread) { + ret = __socket_nonblock (priv->sock); - priv->idx = event_register (ctx->event_pool, priv->sock, - socket_event_handler, this, 1, 1); - if (priv->idx == -1) { - gf_log (this->name, GF_LOG_WARNING, - "failed to register the event"); - ret = -1; + if (ret == -1) { + gf_log (this->name, GF_LOG_ERROR, + "NBIO on %d failed (%s)", + priv->sock, strerror (errno)); + close (priv->sock); + priv->sock = -1; + goto unlock; + } } + + /* + * In the own_thread case, this is used to indicate that we're + * initializing a client connection. + */ + priv->connected = 0; + priv->is_server = _gf_false; + rpc_transport_ref (this); + + if (priv->own_thread) { + if (pipe(priv->pipe) < 0) { + gf_log(this->name,GF_LOG_ERROR, + "could not create pipe"); + } + + this->listener = this; + socket_spawn(this); + } + else { + priv->idx = event_register (ctx->event_pool, priv->sock, + socket_event_handler, + this, 1, 1); + if (priv->idx == -1) { + gf_log ("", GF_LOG_WARNING, + "failed to register the event"); + ret = -1; + } + } } unlock: pthread_mutex_unlock (&priv->lock); @@ -2139,7 +2883,7 @@ err: } -int +static int socket_listen (rpc_transport_t *this) { socket_private_t * priv = NULL; @@ -2221,7 +2965,7 @@ socket_listen (rpc_transport_t *this) } } - if (priv->nodelay) { + if (priv->nodelay && (sa_family != AF_UNIX)) { ret = __socket_nodelay (priv->sock); if (ret == -1) { gf_log (this->name, GF_LOG_ERROR, @@ -2290,7 +3034,7 @@ out: } -int32_t +static int32_t socket_submit_request (rpc_transport_t *this, rpc_transport_req_t *req) { socket_private_t *priv = NULL; @@ -2299,6 +3043,7 @@ socket_submit_request (rpc_transport_t *this, rpc_transport_req_t *req) char need_append = 1; struct ioq *entry = NULL; glusterfs_ctx_t *ctx = NULL; + char a_byte = 'j'; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); @@ -2324,21 +3069,31 @@ socket_submit_request (rpc_transport_t *this, rpc_transport_req_t *req) goto unlock; if (list_empty (&priv->ioq)) { - ret = __socket_ioq_churn_entry (this, entry); + ret = __socket_ioq_churn_entry (this, entry, 1); - if (ret == 0) + if (ret == 0) { need_append = 0; - - if (ret > 0) + } + if (ret > 0) { need_poll_out = 1; + } } if (need_append) { list_add_tail (&entry->list, &priv->ioq); + if (priv->own_thread) { + /* + * Make sure the polling thread wakes up, by + * writing a byte to represent this entry. + */ + if (write(priv->pipe[1],&a_byte,1) < 1) { + gf_log(this->name,GF_LOG_WARNING, + "write error on pipe"); + } + } ret = 0; } - - if (need_poll_out) { + if (!priv->own_thread && need_poll_out) { /* first entry to wait. continue writing on POLLOUT */ priv->idx = event_select_on (ctx->event_pool, priv->sock, @@ -2353,7 +3108,7 @@ out: } -int32_t +static int32_t socket_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) { socket_private_t *priv = NULL; @@ -2362,6 +3117,7 @@ socket_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) char need_append = 1; struct ioq *entry = NULL; glusterfs_ctx_t *ctx = NULL; + char a_byte = 'd'; GF_VALIDATE_OR_GOTO ("socket", this, out); GF_VALIDATE_OR_GOTO ("socket", this->private, out); @@ -2380,33 +3136,44 @@ socket_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply) } goto unlock; } + priv->submit_log = 0; entry = __socket_ioq_new (this, &reply->msg); if (!entry) goto unlock; + if (list_empty (&priv->ioq)) { - ret = __socket_ioq_churn_entry (this, entry); + ret = __socket_ioq_churn_entry (this, entry, 1); - if (ret == 0) + if (ret == 0) { need_append = 0; - - if (ret > 0) + } + if (ret > 0) { need_poll_out = 1; + } } if (need_append) { list_add_tail (&entry->list, &priv->ioq); + if (priv->own_thread) { + /* + * Make sure the polling thread wakes up, by + * writing a byte to represent this entry. + */ + if (write(priv->pipe[1],&a_byte,1) < 1) { + gf_log(this->name,GF_LOG_WARNING, + "write error on pipe"); + } + } ret = 0; } - - if (need_poll_out) { + if (!priv->own_thread && need_poll_out) { /* first entry to wait. continue writing on POLLOUT */ priv->idx = event_select_on (ctx->event_pool, priv->sock, priv->idx, -1, 1); } } - unlock: pthread_mutex_unlock (&priv->lock); @@ -2415,7 +3182,7 @@ out: } -int32_t +static int32_t socket_getpeername (rpc_transport_t *this, char *hostname, int hostlen) { int32_t ret = -1; @@ -2434,7 +3201,7 @@ out: } -int32_t +static int32_t socket_getpeeraddr (rpc_transport_t *this, char *peeraddr, int addrlen, struct sockaddr_storage *sa, socklen_t salen) { @@ -2455,7 +3222,7 @@ out: } -int32_t +static int32_t socket_getmyname (rpc_transport_t *this, char *hostname, int hostlen) { int32_t ret = -1; @@ -2474,7 +3241,7 @@ out: } -int32_t +static int32_t socket_getmyaddr (rpc_transport_t *this, char *myaddr, int addrlen, struct sockaddr_storage *sa, socklen_t salen) { @@ -2494,6 +3261,25 @@ out: } +static int +socket_throttle (rpc_transport_t *this, gf_boolean_t onoff) +{ + socket_private_t *priv = NULL; + + priv = this->private; + + /* The way we implement throttling is by taking off + POLLIN event from the polled flags. This way we + never get called with the POLLIN event and therefore + will never read() any more data until throttling + is turned off. + */ + priv->idx = event_select_on (this->ctx->event_pool, priv->sock, + priv->idx, (int) !onoff, -1); + return 0; +} + + struct rpc_transport_ops tops = { .listen = socket_listen, .connect = socket_connect, @@ -2504,6 +3290,7 @@ struct rpc_transport_ops tops = { .get_peeraddr = socket_getpeeraddr, .get_myname = socket_getmyname, .get_myaddr = socket_getmyaddr, + .throttle = socket_throttle, }; int @@ -2560,7 +3347,7 @@ out: } -int +static int socket_init (rpc_transport_t *this) { socket_private_t *priv = NULL; @@ -2569,6 +3356,7 @@ socket_init (rpc_transport_t *this) char *optstr = NULL; uint32_t keepalive = 0; uint32_t backlog = 0; + int session_id = 0; if (this->private) { gf_log_callingfn (this->name, GF_LOG_ERROR, @@ -2580,6 +3368,7 @@ socket_init (rpc_transport_t *this) if (!priv) { return -1; } + memset(priv,0,sizeof(*priv)); pthread_mutex_init (&priv->lock, NULL); @@ -2697,11 +3486,134 @@ socket_init (rpc_transport_t *this) } } - optstr = NULL; + priv->windowsize = (int)windowsize; + + priv->ssl_enabled = _gf_false; + if (dict_get_str(this->options,SSL_ENABLED_OPT,&optstr) == 0) { + if (gf_string2boolean (optstr, &priv->ssl_enabled) != 0) { + gf_log (this->name, GF_LOG_ERROR, + "invalid value given for ssl-enabled boolean"); + } + } + + priv->ssl_own_cert = DEFAULT_CERT_PATH; + if (dict_get_str(this->options,SSL_OWN_CERT_OPT,&optstr) == 0) { + if (!priv->ssl_enabled) { + gf_log(this->name,GF_LOG_WARNING, + "%s specified without %s (ignored)", + SSL_OWN_CERT_OPT, SSL_ENABLED_OPT); + } + priv->ssl_own_cert = optstr; + } + priv->ssl_own_cert = gf_strdup(priv->ssl_own_cert); + + priv->ssl_private_key = DEFAULT_KEY_PATH; + if (dict_get_str(this->options,SSL_PRIVATE_KEY_OPT,&optstr) == 0) { + if (!priv->ssl_enabled) { + gf_log(this->name,GF_LOG_WARNING, + "%s specified without %s (ignored)", + SSL_PRIVATE_KEY_OPT, SSL_ENABLED_OPT); + } + priv->ssl_private_key = optstr; + } + priv->ssl_private_key = gf_strdup(priv->ssl_private_key); + + priv->ssl_ca_list = DEFAULT_CA_PATH; + if (dict_get_str(this->options,SSL_CA_LIST_OPT,&optstr) == 0) { + if (!priv->ssl_enabled) { + gf_log(this->name,GF_LOG_WARNING, + "%s specified without %s (ignored)", + SSL_CA_LIST_OPT, SSL_ENABLED_OPT); + } + priv->ssl_ca_list = optstr; + } + priv->ssl_ca_list = gf_strdup(priv->ssl_ca_list); + + gf_log(this->name,GF_LOG_INFO,"SSL support is %s", + priv->ssl_enabled ? "ENABLED" : "NOT enabled"); + /* + * This might get overridden temporarily in socket_connect (q.v.) + * if we're using the glusterd portmapper. + */ + priv->use_ssl = priv->ssl_enabled; + + priv->own_thread = priv->use_ssl; + if (dict_get_str(this->options,OWN_THREAD_OPT,&optstr) == 0) { + if (gf_string2boolean (optstr, &priv->own_thread) != 0) { + gf_log (this->name, GF_LOG_ERROR, + "invalid value given for own-thread boolean"); + } + } + gf_log(this->name,GF_LOG_INFO,"using %s polling thread", + priv->own_thread ? "private" : "system"); + + if (priv->use_ssl) { + SSL_library_init(); + SSL_load_error_strings(); + priv->ssl_meth = (SSL_METHOD *)TLSv1_method(); + priv->ssl_ctx = SSL_CTX_new(priv->ssl_meth); + + if (SSL_CTX_set_cipher_list(priv->ssl_ctx, + "HIGH:-SSLv2") == 0) { + gf_log(this->name,GF_LOG_ERROR, + "failed to find any valid ciphers"); + goto err; + } + + if (!SSL_CTX_use_certificate_chain_file(priv->ssl_ctx, + priv->ssl_own_cert)) { + gf_log(this->name,GF_LOG_ERROR, + "could not load our cert"); + goto err; + } + + if (!SSL_CTX_use_PrivateKey_file(priv->ssl_ctx, + priv->ssl_private_key, + SSL_FILETYPE_PEM)) { + gf_log(this->name,GF_LOG_ERROR, + "could not load private key"); + goto err; + } + + if (!SSL_CTX_load_verify_locations(priv->ssl_ctx, + priv->ssl_ca_list,0)) { + gf_log(this->name,GF_LOG_ERROR, + "could not load CA list"); + goto err; + } + +#if (OPENSSL_VERSION_NUMBER < 0x00905100L) + SSL_CTX_set_verify_depth(ctx,1); +#endif + + priv->ssl_session_id = ++session_id; + SSL_CTX_set_session_id_context(priv->ssl_ctx, + (void *)&priv->ssl_session_id, + sizeof(priv->ssl_session_id)); + + SSL_CTX_set_verify(priv->ssl_ctx,SSL_VERIFY_PEER,0); + } + + if (priv->own_thread) { + priv->ot_state = OT_IDLE; + } + out: this->private = priv; - return 0; + +err: + if (priv->ssl_own_cert) { + GF_FREE(priv->ssl_own_cert); + } + if (priv->ssl_private_key) { + GF_FREE(priv->ssl_private_key); + } + if (priv->ssl_ca_list) { + GF_FREE(priv->ssl_ca_list); + } + GF_FREE(priv); + return -1; } @@ -2727,6 +3639,15 @@ fini (rpc_transport_t *this) "transport %p destroyed", this); pthread_mutex_destroy (&priv->lock); + if (priv->ssl_private_key) { + GF_FREE(priv->ssl_private_key); + } + if (priv->ssl_own_cert) { + GF_FREE(priv->ssl_own_cert); + } + if (priv->ssl_ca_list) { + GF_FREE(priv->ssl_ca_list); + } GF_FREE (priv); } @@ -2771,8 +3692,7 @@ struct volume_options options[] = { }, { .key = { "transport.address-family", "address-family" }, - .value = {"inet", "inet6", "inet/inet6", "inet6/inet", - "unix", "inet-sdp" }, + .value = {"inet", "inet6", "unix", "inet-sdp" }, .type = GF_OPTION_TYPE_STR }, @@ -2805,5 +3725,20 @@ struct volume_options options[] = { { .key = {"transport.socket.read-fail-log"}, .type = GF_OPTION_TYPE_BOOL }, + { .key = {SSL_ENABLED_OPT}, + .type = GF_OPTION_TYPE_BOOL + }, + { .key = {SSL_OWN_CERT_OPT}, + .type = GF_OPTION_TYPE_STR + }, + { .key = {SSL_PRIVATE_KEY_OPT}, + .type = GF_OPTION_TYPE_STR + }, + { .key = {SSL_CA_LIST_OPT}, + .type = GF_OPTION_TYPE_STR + }, + { .key = {OWN_THREAD_OPT}, + .type = GF_OPTION_TYPE_BOOL + }, { .key = {NULL} } }; diff --git a/rpc/rpc-transport/socket/src/socket.h b/rpc/rpc-transport/socket/src/socket.h index 0c897bd2e..e0b412fcc 100644 --- a/rpc/rpc-transport/socket/src/socket.h +++ b/rpc/rpc-transport/socket/src/socket.h @@ -11,6 +11,8 @@ #ifndef _SOCKET_H #define _SOCKET_H +#include <openssl/ssl.h> +#include <openssl/err.h> #ifndef _CONFIG_H #define _CONFIG_H @@ -72,6 +74,12 @@ typedef enum { SP_STATE_READ_VERFBYTES, /* read verifier data */ SP_STATE_READING_PROGHDR, SP_STATE_READ_PROGHDR, + SP_STATE_READING_PROGHDR_XDATA, + SP_STATE_READ_PROGHDR_XDATA, /* It's a bad "name" in the generic + RPC state machine, but greatly + aids code review (and xdata is + the only "consumer" of this state) + */ SP_STATE_READING_PROG, } sp_rpcfrag_vectored_request_state_t; @@ -115,6 +123,8 @@ typedef enum { typedef enum { SP_STATE_ACCEPTED_SUCCESS_REPLY_INIT, SP_STATE_READING_PROC_HEADER, + SP_STATE_READING_PROC_OPAQUE, + SP_STATE_READ_PROC_OPAQUE, SP_STATE_READ_PROC_HEADER, } sp_rpcfrag_vectored_reply_accepted_success_state_t; @@ -133,10 +143,60 @@ typedef struct { sp_rpcfrag_vectored_reply_accepted_success_state_t accepted_success_state; } sp_rpcfrag_vectored_reply_state_t; +struct gf_sock_incoming_frag { + char *fragcurrent; + uint32_t bytes_read; + uint32_t remaining_size; + struct iovec vector; + struct iovec *pending_vector; + union { + sp_rpcfrag_request_state_t request; + sp_rpcfrag_vectored_reply_state_t reply; + } call_body; + + sp_rpcfrag_simple_msg_state_t simple_state; + sp_rpcfrag_state_t state; +}; + +#define GF_SOCKET_RA_MAX 1024 + +struct gf_sock_incoming { + sp_rpcrecord_state_t record_state; + struct gf_sock_incoming_frag frag; + char *proghdr_base_addr; + struct iobuf *iobuf; + size_t iobuf_size; + struct iovec vector[2]; + int count; + struct iovec payload_vector; + struct iobref *iobref; + rpc_request_info_t *request_info; + struct iovec *pending_vector; + int pending_count; + uint32_t fraghdr; + char complete_record; + msg_type_t msg_type; + size_t total_bytes_read; + + size_t ra_read; + size_t ra_max; + size_t ra_served; + char *ra_buf; +}; + +typedef enum { + OT_IDLE, /* Uninitialized or termination complete. */ + OT_SPAWNING, /* Past pthread_create but not in thread yet. */ + OT_RUNNING, /* Poller thread running normally. */ + OT_CALLBACK, /* Poller thread in the middle of a callback. */ + OT_PLEASE_DIE, /* Poller termination requested. */ +} ot_state_t; + typedef struct { int32_t sock; int32_t idx; - unsigned char connected; // -1 = not connected. 0 = in progress. 1 = connected + /* -1 = not connected. 0 = in progress. 1 = connected */ + char connected; char bio; char connect_finish_log; char submit_log; @@ -147,36 +207,7 @@ typedef struct { struct ioq *ioq_prev; }; }; - struct { - sp_rpcrecord_state_t record_state; - struct { - char *fragcurrent; - uint32_t bytes_read; - uint32_t remaining_size; - struct iovec vector; - struct iovec *pending_vector; - union { - sp_rpcfrag_request_state_t request; - sp_rpcfrag_vectored_reply_state_t reply; - } call_body; - - sp_rpcfrag_simple_msg_state_t simple_state; - sp_rpcfrag_state_t state; - } frag; - struct iobuf *iobuf; - size_t iobuf_size; - struct iovec vector[2]; - int count; - struct iovec payload_vector; - struct iobref *iobref; - rpc_request_info_t *request_info; - struct iovec *pending_vector; - int pending_count; - uint32_t fraghdr; - char complete_record; - msg_type_t msg_type; - size_t total_bytes_read; - } incoming; + struct gf_sock_incoming incoming; pthread_mutex_t lock; int windowsize; char lowlat; @@ -186,6 +217,22 @@ typedef struct { int keepaliveintvl; uint32_t backlog; gf_boolean_t read_fail_log; + gf_boolean_t ssl_enabled; + gf_boolean_t use_ssl; + SSL_METHOD *ssl_meth; + SSL_CTX *ssl_ctx; + int ssl_session_id; + BIO *ssl_sbio; + SSL *ssl_ssl; + char *ssl_own_cert; + char *ssl_private_key; + char *ssl_ca_list; + pthread_t thread; + int pipe[2]; + gf_boolean_t own_thread; + ot_state_t ot_state; + uint32_t ot_gen; + gf_boolean_t is_server; } socket_private_t; |
