summaryrefslogtreecommitdiffstats
path: root/rpc/rpc-transport/rdma/src/rdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'rpc/rpc-transport/rdma/src/rdma.c')
-rw-r--r--rpc/rpc-transport/rdma/src/rdma.c3164
1 files changed, 1394 insertions, 1770 deletions
diff --git a/rpc/rpc-transport/rdma/src/rdma.c b/rpc/rpc-transport/rdma/src/rdma.c
index 04531cda0..6e6099a98 100644
--- a/rpc/rpc-transport/rdma/src/rdma.c
+++ b/rpc/rpc-transport/rdma/src/rdma.c
@@ -1,23 +1,13 @@
/*
- Copyright (c) 2006-2011 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
#ifndef _CONFIG_H
#define _CONFIG_H
#include "config.h"
@@ -30,6 +20,7 @@
#include "name.h"
#include "byte-order.h"
#include "xlator.h"
+#include "xdr-rpc.h"
#include <signal.h>
#define GF_RDMA_LOG_NAME "rpc-transport/rdma"
@@ -43,99 +34,29 @@ gf_rdma_post_ref (gf_rdma_post_t *post);
int
gf_rdma_post_unref (gf_rdma_post_t *post);
-int32_t
-gf_resolve_ip6 (const char *hostname,
- uint16_t port,
- int family,
- void **dnscache,
- struct addrinfo **addr_info);
-
-static uint16_t
-gf_rdma_get_local_lid (struct ibv_context *context,
- int32_t port)
-{
- struct ibv_port_attr attr;
-
- if (ibv_query_port (context, port, &attr))
- return 0;
+static void *
+gf_rdma_send_completion_proc (void *data);
- return attr.lid;
-}
+static void *
+gf_rdma_recv_completion_proc (void *data);
-static const char *
-get_port_state_str(enum ibv_port_state pstate)
-{
- switch (pstate) {
- case IBV_PORT_DOWN: return "PORT_DOWN";
- case IBV_PORT_INIT: return "PORT_INIT";
- case IBV_PORT_ARMED: return "PORT_ARMED";
- case IBV_PORT_ACTIVE: return "PORT_ACTIVE";
- case IBV_PORT_ACTIVE_DEFER: return "PORT_ACTIVE_DEFER";
- default: return "invalid state";
- }
-}
+void *
+gf_rdma_async_event_thread (void *context);
static int32_t
-ib_check_active_port (struct ibv_context *ctx, uint8_t port)
-{
- struct ibv_port_attr port_attr = {0, };
- int32_t ret = 0;
- const char *state_str = NULL;
-
- if (!ctx) {
- gf_log_callingfn (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Error in supplied context");
- return -1;
- }
-
- ret = ibv_query_port (ctx, port, &port_attr);
-
- if (ret) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Failed to query port %u properties", port);
- return -1;
- }
-
- state_str = get_port_state_str (port_attr.state);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE,
- "Infiniband PORT: (%u) STATE: (%s)",
- port, state_str);
-
- if (port_attr.state == IBV_PORT_ACTIVE)
- return 0;
-
- return -1;
-}
+gf_rdma_create_qp (rpc_transport_t *this);
static int32_t
-ib_get_active_port (struct ibv_context *ib_ctx)
-{
- struct ibv_device_attr ib_device_attr = {{0, }, };
- int32_t ret = -1;
- uint8_t ib_port = 0;
+__gf_rdma_teardown (rpc_transport_t *this);
- if (!ib_ctx) {
- gf_log_callingfn (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Error in supplied context");
- return -1;
- }
- if (ibv_query_device (ib_ctx, &ib_device_attr)) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Failed to query device properties");
- return -1;
- }
+static int32_t
+gf_rdma_teardown (rpc_transport_t *this);
- for (ib_port = 1; ib_port <= ib_device_attr.phys_port_cnt; ++ib_port) {
- ret = ib_check_active_port (ib_ctx, ib_port);
- if (ret == 0)
- return ib_port;
+static int32_t
+gf_rdma_disconnect (rpc_transport_t *this);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE,
- "Port:(%u) not active", ib_port);
- continue;
- }
- return ret;
-}
+static void
+gf_rdma_cm_handle_disconnect (rpc_transport_t *this);
static void
@@ -165,7 +86,7 @@ gf_rdma_put_post (gf_rdma_queue_t *queue, gf_rdma_post_t *post)
static gf_rdma_post_t *
-gf_rdma_new_post (gf_rdma_device_t *device, int32_t len,
+gf_rdma_new_post (rpc_transport_t *this, gf_rdma_device_t *device, int32_t len,
gf_rdma_post_type_t type)
{
gf_rdma_post_t *post = NULL;
@@ -192,8 +113,9 @@ gf_rdma_new_post (gf_rdma_device_t *device, int32_t len,
post->buf_size,
IBV_ACCESS_LOCAL_WRITE);
if (!post->mr) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "memory registration failed");
+ gf_log (this->name, GF_LOG_WARNING,
+ "memory registration failed (%s)",
+ strerror (errno));
goto out;
}
@@ -203,9 +125,7 @@ gf_rdma_new_post (gf_rdma_device_t *device, int32_t len,
ret = 0;
out:
if (ret != 0) {
- if (post->buf != NULL) {
- free (post->buf);
- }
+ free (post->buf);
GF_FREE (post);
post = NULL;
@@ -268,22 +188,6 @@ __gf_rdma_quota_get (gf_rdma_peer_t *peer)
return ret;
}
-/*
- static int32_t
- gf_rdma_quota_get (gf_rdma_peer_t *peer)
- {
- int32_t ret = -1;
- gf_rdma_private_t *priv = peer->trans->private;
-
- pthread_mutex_lock (&priv->write_mutex);
- {
- ret = __gf_rdma_quota_get (peer);
- }
- pthread_mutex_unlock (&priv->write_mutex);
-
- return ret;
- }
-*/
static void
__gf_rdma_ioq_entry_free (gf_rdma_ioq_t *entry)
@@ -299,6 +203,7 @@ __gf_rdma_ioq_entry_free (gf_rdma_ioq_t *entry)
iobref_unref (entry->msg.request.rsp_iobref);
entry->msg.request.rsp_iobref = NULL;
}
+
mem_put (entry);
}
@@ -318,26 +223,898 @@ static int32_t
__gf_rdma_disconnect (rpc_transport_t *this)
{
gf_rdma_private_t *priv = NULL;
- int32_t ret = 0;
priv = this->private;
- if (priv->connected || priv->tcp_connected) {
- fcntl (priv->sock, F_SETFL, O_NONBLOCK);
- if (shutdown (priv->sock, SHUT_RDWR) != 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "shutdown () - error: %s",
- strerror (errno));
- ret = -errno;
- priv->tcp_connected = 0;
+ if (priv->connected) {
+ rdma_disconnect (priv->peer.cm_id);
+ }
+
+ return 0;
+}
+
+
+static void
+gf_rdma_queue_init (gf_rdma_queue_t *queue)
+{
+ pthread_mutex_init (&queue->lock, NULL);
+
+ queue->active_posts.next = &queue->active_posts;
+ queue->active_posts.prev = &queue->active_posts;
+ queue->passive_posts.next = &queue->passive_posts;
+ queue->passive_posts.prev = &queue->passive_posts;
+}
+
+
+static void
+__gf_rdma_destroy_queue (gf_rdma_post_t *post)
+{
+ gf_rdma_post_t *tmp = NULL;
+
+ while (post->next != post) {
+ tmp = post->next;
+
+ post->next = post->next->next;
+ post->next->prev = post;
+
+ gf_rdma_destroy_post (tmp);
+ }
+}
+
+
+static void
+gf_rdma_destroy_queue (gf_rdma_queue_t *queue)
+{
+ if (queue == NULL) {
+ goto out;
+ }
+
+ pthread_mutex_lock (&queue->lock);
+ {
+ if (queue->passive_count > 0) {
+ __gf_rdma_destroy_queue (&queue->passive_posts);
+ queue->passive_count = 0;
+ }
+
+ if (queue->active_count > 0) {
+ __gf_rdma_destroy_queue (&queue->active_posts);
+ queue->active_count = 0;
+ }
+ }
+ pthread_mutex_unlock (&queue->lock);
+
+out:
+ return;
+}
+
+
+static void
+gf_rdma_destroy_posts (rpc_transport_t *this)
+{
+ gf_rdma_device_t *device = NULL;
+ gf_rdma_private_t *priv = NULL;
+
+ if (this == NULL) {
+ goto out;
+ }
+
+ priv = this->private;
+ device = priv->device;
+
+ gf_rdma_destroy_queue (&device->sendq);
+ gf_rdma_destroy_queue (&device->recvq);
+
+out:
+ return;
+}
+
+
+static int32_t
+__gf_rdma_create_posts (rpc_transport_t *this, int32_t count, int32_t size,
+ gf_rdma_queue_t *q, gf_rdma_post_type_t type)
+{
+ int32_t i = 0;
+ int32_t ret = 0;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_device_t *device = NULL;
+
+ priv = this->private;
+ device = priv->device;
+
+ for (i=0 ; i<count ; i++) {
+ gf_rdma_post_t *post = NULL;
+
+ post = gf_rdma_new_post (this, device, size + 2048, type);
+ if (!post) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "post creation failed");
+ ret = -1;
+ break;
+ }
+
+ gf_rdma_put_post (q, post);
+ }
+ return ret;
+}
+
+
+static int32_t
+gf_rdma_post_recv (struct ibv_srq *srq,
+ gf_rdma_post_t *post)
+{
+ struct ibv_sge list = {
+ .addr = (unsigned long) post->buf,
+ .length = post->buf_size,
+ .lkey = post->mr->lkey
+ };
+
+ struct ibv_recv_wr wr = {
+ .wr_id = (unsigned long) post,
+ .sg_list = &list,
+ .num_sge = 1,
+ }, *bad_wr;
+
+ gf_rdma_post_ref (post);
+
+ return ibv_post_srq_recv (srq, &wr, &bad_wr);
+}
+
+
+static int32_t
+gf_rdma_create_posts (rpc_transport_t *this)
+{
+ int32_t i = 0, ret = 0;
+ gf_rdma_post_t *post = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_options_t *options = NULL;
+ gf_rdma_device_t *device = NULL;
+
+ priv = this->private;
+ options = &priv->options;
+ device = priv->device;
+
+ ret = __gf_rdma_create_posts (this, options->send_count,
+ options->send_size,
+ &device->sendq, GF_RDMA_SEND_POST);
+ if (!ret)
+ ret = __gf_rdma_create_posts (this, options->recv_count,
+ options->recv_size,
+ &device->recvq,
+ GF_RDMA_RECV_POST);
+
+ if (!ret) {
+ for (i=0 ; i<options->recv_count ; i++) {
+ post = gf_rdma_get_post (&device->recvq);
+ if (gf_rdma_post_recv (device->srq, post) != 0) {
+ ret = -1;
+ break;
+ }
+ }
+ }
+
+ if (ret)
+ gf_rdma_destroy_posts (this);
+
+ return ret;
+}
+
+
+static void
+gf_rdma_destroy_cq (rpc_transport_t *this)
+{
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_device_t *device = NULL;
+
+ priv = this->private;
+ device = priv->device;
+
+ if (device->recv_cq)
+ ibv_destroy_cq (device->recv_cq);
+ device->recv_cq = NULL;
+
+ if (device->send_cq)
+ ibv_destroy_cq (device->send_cq);
+ device->send_cq = NULL;
+
+ return;
+}
+
+
+static int32_t
+gf_rdma_create_cq (rpc_transport_t *this)
+{
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_options_t *options = NULL;
+ gf_rdma_device_t *device = NULL;
+ uint64_t send_cqe = 0;
+ int32_t ret = 0;
+ struct ibv_device_attr device_attr = {{0}, };
+
+ priv = this->private;
+ options = &priv->options;
+ device = priv->device;
+
+ device->recv_cq = ibv_create_cq (priv->device->context,
+ options->recv_count * 2,
+ device,
+ device->recv_chan,
+ 0);
+ if (!device->recv_cq) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "creation of CQ for device %s failed",
+ device->device_name);
+ ret = -1;
+ goto out;
+ } else if (ibv_req_notify_cq (device->recv_cq, 0)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "ibv_req_notify_cq on recv CQ of device %s failed",
+ device->device_name);
+ ret = -1;
+ goto out;
+ }
+
+ do {
+ ret = ibv_query_device (priv->device->context, &device_attr);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "ibv_query_device on %s returned %d (%s)",
+ priv->device->device_name, ret,
+ (ret > 0) ? strerror (ret) : "");
+ ret = -1;
+ goto out;
+ }
+
+ send_cqe = options->send_count * 128;
+ send_cqe = (send_cqe > device_attr.max_cqe)
+ ? device_attr.max_cqe : send_cqe;
+
+ /* TODO: make send_cq size dynamically adaptive */
+ device->send_cq = ibv_create_cq (priv->device->context,
+ send_cqe, device,
+ device->send_chan, 0);
+ if (!device->send_cq) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "creation of send_cq for device %s failed",
+ device->device_name);
+ ret = -1;
+ goto out;
+ }
+
+ if (ibv_req_notify_cq (device->send_cq, 0)) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "ibv_req_notify_cq on send_cq for device %s"
+ " failed", device->device_name);
+ ret = -1;
+ goto out;
+ }
+ } while (0);
+
+out:
+ if (ret != 0)
+ gf_rdma_destroy_cq (this);
+
+ return ret;
+}
+
+
+static gf_rdma_device_t *
+gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx,
+ char *device_name)
+{
+ glusterfs_ctx_t *ctx = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_options_t *options = NULL;
+ int32_t ret = 0;
+ int32_t i = 0;
+ gf_rdma_device_t *trav = NULL, *device = NULL;
+ gf_rdma_ctx_t *rdma_ctx = NULL;
+
+ priv = this->private;
+ options = &priv->options;
+ ctx = this->ctx;
+ rdma_ctx = ctx->ib;
+
+ trav = rdma_ctx->device;
+
+ while (trav) {
+ if (!strcmp (trav->device_name, device_name))
+ break;
+ trav = trav->next;
+ }
+
+ if (!trav) {
+ trav = GF_CALLOC (1, sizeof (*trav),
+ gf_common_mt_rdma_device_t);
+ if (trav == NULL) {
+ goto out;
+ }
+
+ priv->device = trav;
+ trav->context = ibctx;
+
+ trav->request_ctx_pool
+ = mem_pool_new (gf_rdma_request_context_t,
+ GF_RDMA_POOL_SIZE);
+ if (trav->request_ctx_pool == NULL) {
+ goto out;
+ }
+
+ trav->ioq_pool
+ = mem_pool_new (gf_rdma_ioq_t, GF_RDMA_POOL_SIZE);
+ if (trav->ioq_pool == NULL) {
+ goto out;
+ }
+
+ trav->reply_info_pool = mem_pool_new (gf_rdma_reply_info_t,
+ GF_RDMA_POOL_SIZE);
+ if (trav->reply_info_pool == NULL) {
+ goto out;
+ }
+
+ trav->device_name = gf_strdup (device_name);
+
+ trav->next = rdma_ctx->device;
+ rdma_ctx->device = trav;
+
+ trav->send_chan = ibv_create_comp_channel (trav->context);
+ if (!trav->send_chan) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create send completion channel for "
+ "device (%s)", device_name);
+ goto out;
+ }
+
+ trav->recv_chan = ibv_create_comp_channel (trav->context);
+ if (!trav->recv_chan) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create recv completion channel for "
+ "device (%s)", device_name);
+
+ /* TODO: cleanup current mess */
+ goto out;
+ }
+
+ if (gf_rdma_create_cq (this) < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create CQ for device (%s)",
+ device_name);
+ goto out;
+ }
+
+ /* protection domain */
+ trav->pd = ibv_alloc_pd (trav->context);
+
+ if (!trav->pd) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not allocate protection domain for "
+ "device (%s)", device_name);
+ goto out;
+ }
+
+ struct ibv_srq_init_attr attr = {
+ .attr = {
+ .max_wr = options->recv_count,
+ .max_sge = 1,
+ .srq_limit = 10
+ }
+ };
+ trav->srq = ibv_create_srq (trav->pd, &attr);
+
+ if (!trav->srq) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create SRQ for device (%s)",
+ device_name);
+ goto out;
+ }
+
+ /* queue init */
+ gf_rdma_queue_init (&trav->sendq);
+ gf_rdma_queue_init (&trav->recvq);
+
+ if (gf_rdma_create_posts (this) < 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not allocate posts for device (%s)",
+ device_name);
+ goto out;
+ }
+
+ /* completion threads */
+ ret = gf_thread_create (&trav->send_thread, NULL,
+ gf_rdma_send_completion_proc,
+ trav->send_chan);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create send completion thread for "
+ "device (%s)", device_name);
+ goto out;
+ }
+
+ ret = gf_thread_create (&trav->recv_thread, NULL,
+ gf_rdma_recv_completion_proc,
+ trav->recv_chan);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create recv completion thread "
+ "for device (%s)", device_name);
+ return NULL;
+ }
+
+ ret = gf_thread_create (&trav->async_event_thread, NULL,
+ gf_rdma_async_event_thread,
+ ibctx);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "could not create async_event_thread");
+ return NULL;
+ }
+
+ /* qpreg */
+ pthread_mutex_init (&trav->qpreg.lock, NULL);
+ for (i=0; i<42; i++) {
+ trav->qpreg.ents[i].next = &trav->qpreg.ents[i];
+ trav->qpreg.ents[i].prev = &trav->qpreg.ents[i];
+ }
+ }
+
+ device = trav;
+ trav = NULL;
+out:
+
+ if (trav != NULL) {
+ gf_rdma_destroy_posts (this);
+ mem_pool_destroy (trav->ioq_pool);
+ mem_pool_destroy (trav->request_ctx_pool);
+ mem_pool_destroy (trav->reply_info_pool);
+ ibv_dealloc_pd (trav->pd);
+ gf_rdma_destroy_cq (this);
+ ibv_destroy_comp_channel (trav->recv_chan);
+ ibv_destroy_comp_channel (trav->send_chan);
+ GF_FREE ((char *)trav->device_name);
+ GF_FREE (trav);
+ }
+
+ return device;
+}
+
+
+static rpc_transport_t *
+gf_rdma_transport_new (rpc_transport_t *listener, struct rdma_cm_id *cm_id)
+{
+ gf_rdma_private_t *listener_priv = NULL, *priv = NULL;
+ rpc_transport_t *this = NULL, *new = NULL;
+ gf_rdma_options_t *options = NULL;
+ char *device_name = NULL;
+
+ listener_priv = listener->private;
+
+ this = GF_CALLOC (1, sizeof (rpc_transport_t),
+ gf_common_mt_rpc_transport_t);
+ if (this == NULL) {
+ goto out;
+ }
+
+ this->listener = listener;
+
+ priv = GF_CALLOC (1, sizeof (gf_rdma_private_t),
+ gf_common_mt_rdma_private_t);
+ if (priv == NULL) {
+ goto out;
+ }
+
+ this->private = priv;
+ priv->options = listener_priv->options;
+
+ priv->listener = listener;
+ priv->entity = GF_RDMA_SERVER;
+
+ options = &priv->options;
+
+ this->ops = listener->ops;
+ this->init = listener->init;
+ this->fini = listener->fini;
+ this->ctx = listener->ctx;
+ this->name = gf_strdup (listener->name);
+ this->notify = listener->notify;
+ this->mydata = listener->mydata;
+
+ this->myinfo.sockaddr_len = sizeof (cm_id->route.addr.src_addr);
+ memcpy (&this->myinfo.sockaddr, &cm_id->route.addr.src_addr,
+ this->myinfo.sockaddr_len);
+
+ this->peerinfo.sockaddr_len = sizeof (cm_id->route.addr.dst_addr);
+ memcpy (&this->peerinfo.sockaddr, &cm_id->route.addr.dst_addr,
+ this->peerinfo.sockaddr_len);
+
+ priv->peer.trans = this;
+ gf_rdma_get_transport_identifiers (this);
+
+ device_name = (char *)ibv_get_device_name (cm_id->verbs->device);
+ if (device_name == NULL) {
+ gf_log (listener->name, GF_LOG_WARNING,
+ "cannot get device name (peer:%s me:%s)",
+ this->peerinfo.identifier, this->myinfo.identifier);
+ goto out;
+ }
+
+ priv->device = gf_rdma_get_device (this, cm_id->verbs,
+ device_name);
+ if (priv->device == NULL) {
+ gf_log (listener->name, GF_LOG_WARNING,
+ "cannot get infiniband device %s (peer:%s me:%s)",
+ device_name, this->peerinfo.identifier,
+ this->myinfo.identifier);
+ goto out;
+ }
+
+ priv->peer.send_count = options->send_count;
+ priv->peer.recv_count = options->recv_count;
+ priv->peer.send_size = options->send_size;
+ priv->peer.recv_size = options->recv_size;
+ priv->peer.cm_id = cm_id;
+ INIT_LIST_HEAD (&priv->peer.ioq);
+
+ pthread_mutex_init (&priv->write_mutex, NULL);
+ pthread_mutex_init (&priv->recv_mutex, NULL);
+
+ cm_id->context = this;
+
+ new = rpc_transport_ref (this);
+ this = NULL;
+out:
+ if (this != NULL) {
+ if (this->private != NULL) {
+ GF_FREE (this->private);
+ }
+
+ if (this->name != NULL) {
+ GF_FREE (this->name);
+ }
+
+ GF_FREE (this);
+ }
+
+ return new;
+}
+
+
+static int
+gf_rdma_cm_handle_connect_request (struct rdma_cm_event *event)
+{
+ int ret = -1;
+ rpc_transport_t *this = NULL, *listener = NULL;
+ struct rdma_cm_id *child_cm_id = NULL, *listener_cm_id = NULL;
+ struct rdma_conn_param conn_param = {0, };
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_options_t *options = NULL;
+
+ child_cm_id = event->id;
+ listener_cm_id = event->listen_id;
+
+ listener = listener_cm_id->context;
+ priv = listener->private;
+ options = &priv->options;
+
+ this = gf_rdma_transport_new (listener, child_cm_id);
+ if (this == NULL) {
+ gf_log (listener->name, GF_LOG_WARNING,
+ "could not create a transport for incoming connection"
+ " (me.name:%s me.identifier:%s)", listener->name,
+ listener->myinfo.identifier);
+ rdma_destroy_id (child_cm_id);
+ goto out;
+ }
+
+ gf_log (listener->name, GF_LOG_TRACE,
+ "got a connect request (me:%s peer:%s)",
+ listener->myinfo.identifier, this->peerinfo.identifier);
+
+ ret = gf_rdma_create_qp (this);
+ if (ret < 0) {
+ gf_log (listener->name, GF_LOG_WARNING,
+ "could not create QP (peer:%s me:%s)",
+ this->peerinfo.identifier, this->myinfo.identifier);
+ gf_rdma_cm_handle_disconnect (this);
+ goto out;
+ }
+
+ conn_param.responder_resources = 1;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = options->attr_retry_cnt;
+ conn_param.rnr_retry_count = options->attr_rnr_retry;
+
+ ret = rdma_accept(child_cm_id, &conn_param);
+ if (ret < 0) {
+ gf_log (listener->name, GF_LOG_WARNING, "rdma_accept failed "
+ "peer:%s me:%s (%s)", this->peerinfo.identifier,
+ this->myinfo.identifier, strerror (errno));
+ gf_rdma_cm_handle_disconnect (this);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+static int
+gf_rdma_cm_handle_route_resolved (struct rdma_cm_event *event)
+{
+ struct rdma_conn_param conn_param = {0, };
+ int ret = 0;
+ rpc_transport_t *this = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_peer_t *peer = NULL;
+ gf_rdma_options_t *options = NULL;
+
+ if (event == NULL) {
+ goto out;
+ }
+
+ this = event->id->context;
+
+ priv = this->private;
+ peer = &priv->peer;
+ options = &priv->options;
+
+ ret = gf_rdma_create_qp (this);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "could not create QP (peer:%s me:%s)",
+ this->peerinfo.identifier, this->myinfo.identifier);
+ gf_rdma_cm_handle_disconnect (this);
+ goto out;
+ }
+
+ memset(&conn_param, 0, sizeof conn_param);
+ conn_param.responder_resources = 1;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = options->attr_retry_cnt;
+ conn_param.rnr_retry_count = options->attr_rnr_retry;
+
+ ret = rdma_connect(peer->cm_id, &conn_param);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_connect failed (%s)", strerror (errno));
+ gf_rdma_cm_handle_disconnect (this);
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "route resolved (me:%s peer:%s)",
+ this->myinfo.identifier, this->peerinfo.identifier);
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+static int
+gf_rdma_cm_handle_addr_resolved (struct rdma_cm_event *event)
+{
+ rpc_transport_t *this = NULL;
+ gf_rdma_peer_t *peer = NULL;
+ gf_rdma_private_t *priv = NULL;
+ int ret = 0;
+
+ this = event->id->context;
+
+ priv = this->private;
+ peer = &priv->peer;
+
+ GF_ASSERT (peer->cm_id == event->id);
+
+ this->myinfo.sockaddr_len = sizeof (peer->cm_id->route.addr.src_addr);
+ memcpy (&this->myinfo.sockaddr, &peer->cm_id->route.addr.src_addr,
+ this->myinfo.sockaddr_len);
+
+ this->peerinfo.sockaddr_len = sizeof (peer->cm_id->route.addr.dst_addr);
+ memcpy (&this->peerinfo.sockaddr, &peer->cm_id->route.addr.dst_addr,
+ this->peerinfo.sockaddr_len);
+
+ gf_rdma_get_transport_identifiers (this);
+
+ ret = rdma_resolve_route(peer->cm_id, 2000);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_resolve_route failed (me:%s peer:%s) (%s)",
+ this->myinfo.identifier, this->peerinfo.identifier,
+ strerror (errno));
+ gf_rdma_cm_handle_disconnect (this);
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "Address resolved (me:%s peer:%s)",
+ this->myinfo.identifier, this->peerinfo.identifier);
+
+ return ret;
+}
+
+
+static void
+gf_rdma_cm_handle_disconnect (rpc_transport_t *this)
+{
+ gf_rdma_private_t *priv = NULL;
+ char need_unref = 0, connected = 0;
+
+ priv = this->private;
+ gf_log (this->name, GF_LOG_DEBUG,
+ "peer disconnected, cleaning up");
+
+ pthread_mutex_lock (&priv->write_mutex);
+ {
+ if (priv->peer.cm_id != NULL) {
+ need_unref = 1;
+ connected = priv->connected;
priv->connected = 0;
}
+
+ __gf_rdma_teardown (this);
+ }
+ pthread_mutex_unlock (&priv->write_mutex);
+
+ if (connected) {
+ rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, this);
}
+ if (need_unref)
+ rpc_transport_unref (this);
+
+}
+
+
+static int
+gf_rdma_cm_handle_event_established (struct rdma_cm_event *event)
+{
+ rpc_transport_t *this = NULL;
+ gf_rdma_private_t *priv = NULL;
+ struct rdma_cm_id *cm_id = NULL;
+ int ret = 0;
+
+ cm_id = event->id;
+ this = cm_id->context;
+ priv = this->private;
+
+ priv->connected = 1;
+
+ pthread_mutex_lock (&priv->write_mutex);
+ {
+ priv->peer.quota = 1;
+ priv->peer.quota_set = 0;
+ }
+ pthread_mutex_unlock (&priv->write_mutex);
+
+ if (priv->entity == GF_RDMA_CLIENT) {
+ ret = rpc_transport_notify (this, RPC_TRANSPORT_CONNECT, this);
+
+ } else if (priv->entity == GF_RDMA_SERVER) {
+ ret = rpc_transport_notify (priv->listener,
+ RPC_TRANSPORT_ACCEPT, this);
+ }
+
+ if (ret < 0) {
+ gf_rdma_disconnect (this);
+ }
+
+ gf_log (this->name, GF_LOG_TRACE,
+ "recieved event RDMA_CM_EVENT_ESTABLISHED (me:%s peer:%s)",
+ this->myinfo.identifier, this->peerinfo.identifier);
+
return ret;
}
+static int
+gf_rdma_cm_handle_event_error (rpc_transport_t *this)
+{
+ gf_rdma_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (priv->entity != GF_RDMA_SERVER_LISTENER) {
+ gf_rdma_cm_handle_disconnect (this);
+ }
+
+ return 0;
+}
+
+
+static int
+gf_rdma_cm_handle_device_removal (struct rdma_cm_event *event)
+{
+ return 0;
+}
+
+
+static void *
+gf_rdma_cm_event_handler (void *data)
+{
+ struct rdma_cm_event *event = NULL;
+ int ret = 0;
+ rpc_transport_t *this = NULL;
+ struct rdma_event_channel *event_channel = NULL;
+
+ event_channel = data;
+
+ while (1) {
+ ret = rdma_get_cm_event (event_channel, &event);
+ if (ret != 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma_cm_get_event failed (%s)",
+ strerror (errno));
+ break;
+ }
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ gf_rdma_cm_handle_addr_resolved (event);
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ gf_rdma_cm_handle_route_resolved (event);
+ break;
+
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ gf_rdma_cm_handle_connect_request (event);
+ break;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ gf_rdma_cm_handle_event_established (event);
+ break;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_REJECTED:
+ this = event->id->context;
+
+ gf_log (this->name, GF_LOG_WARNING,
+ "cma event %s, error %d (me:%s peer:%s)\n",
+ rdma_event_str(event->event), event->status,
+ this->myinfo.identifier,
+ this->peerinfo.identifier);
+
+ rdma_ack_cm_event (event);
+ event = NULL;
+
+ gf_rdma_cm_handle_event_error (this);
+ continue;
+
+ case RDMA_CM_EVENT_DISCONNECTED:
+ this = event->id->context;
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "recieved disconnect (me:%s peer:%s)\n",
+ this->myinfo.identifier,
+ this->peerinfo.identifier);
+
+ rdma_ack_cm_event (event);
+ event = NULL;
+
+ gf_rdma_cm_handle_disconnect (this);
+ continue;
+
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "device removed");
+ gf_rdma_cm_handle_device_removal (event);
+ break;
+
+ default:
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "unhandled event: %s, ignoring",
+ rdma_event_str(event->event));
+ break;
+ }
+
+ rdma_ack_cm_event (event);
+ }
+
+ return NULL;
+}
+
+
static int32_t
gf_rdma_post_send (struct ibv_qp *qp, gf_rdma_post_t *post, int32_t len)
{
@@ -363,33 +1140,33 @@ gf_rdma_post_send (struct ibv_qp *qp, gf_rdma_post_t *post, int32_t len)
int
__gf_rdma_encode_error(gf_rdma_peer_t *peer, gf_rdma_reply_info_t *reply_info,
- struct iovec *rpchdr, uint32_t *ptr,
+ struct iovec *rpchdr, gf_rdma_header_t *hdr,
gf_rdma_errcode_t err)
{
- uint32_t *startp = NULL;
struct rpc_msg *rpc_msg = NULL;
- startp = ptr;
if (reply_info != NULL) {
- *ptr++ = hton32(reply_info->rm_xid);
+ hdr->rm_xid = hton32(reply_info->rm_xid);
} else {
rpc_msg = rpchdr[0].iov_base; /* assume rpchdr contains
* only one vector.
* (which is true)
*/
- *ptr++ = rpc_msg->rm_xid;
+ hdr->rm_xid = rpc_msg->rm_xid;
}
- *ptr++ = hton32(GF_RDMA_VERSION);
- *ptr++ = hton32(peer->send_count);
- *ptr++ = hton32(GF_RDMA_ERROR);
- *ptr++ = hton32(err);
+ hdr->rm_vers = hton32(GF_RDMA_VERSION);
+ hdr->rm_credit = hton32(peer->send_count);
+ hdr->rm_type = hton32(GF_RDMA_ERROR);
+ hdr->rm_body.rm_error.rm_type = hton32(err);
if (err == ERR_VERS) {
- *ptr++ = hton32(GF_RDMA_VERSION);
- *ptr++ = hton32(GF_RDMA_VERSION);
+ hdr->rm_body.rm_error.rm_version.gf_rdma_vers_low
+ = hton32(GF_RDMA_VERSION);
+ hdr->rm_body.rm_error.rm_version.gf_rdma_vers_high
+ = hton32(GF_RDMA_VERSION);
}
- return (int)((unsigned long)ptr - (unsigned long)startp);
+ return sizeof (*hdr);
}
@@ -401,7 +1178,7 @@ __gf_rdma_send_error (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
int32_t ret = -1, len = 0;
len = __gf_rdma_encode_error (peer, reply_info, entry->rpchdr,
- (uint32_t *)post->buf, err);
+ (gf_rdma_header_t *)post->buf, err);
if (len == -1) {
gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
"encode error returned -1");
@@ -460,8 +1237,10 @@ __gf_rdma_create_read_chunks_from_vector (gf_rdma_peer_t *peer,
vector[i].iov_len,
IBV_ACCESS_REMOTE_READ);
if (!mr) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "memory registration failed");
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "memory registration failed (%s) (peer:%s)",
+ strerror (errno),
+ peer->trans->peerinfo.identifier);
goto out;
}
@@ -510,8 +1289,8 @@ __gf_rdma_create_read_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
entry->rpchdr_count,
request_ctx);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "cannot create read chunks from vector, "
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "cannot create read chunks from vector "
"entry->rpchdr");
goto out;
}
@@ -523,8 +1302,8 @@ __gf_rdma_create_read_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
entry->proghdr_count,
request_ctx);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "cannot create read chunks from vector, "
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "cannot create read chunks from vector "
"entry->proghdr");
}
@@ -536,8 +1315,8 @@ __gf_rdma_create_read_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
entry->prog_payload_count,
request_ctx);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "cannot create read chunks from vector,"
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "cannot create read chunks from vector"
" entry->prog_payload");
}
}
@@ -550,8 +1329,8 @@ __gf_rdma_create_read_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
entry->prog_payload_count,
request_ctx);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "cannot create read chunks from vector, "
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "cannot create read chunks from vector "
"entry->prog_payload");
}
}
@@ -594,8 +1373,10 @@ __gf_rdma_create_write_chunks_from_vector (gf_rdma_peer_t *peer,
IBV_ACCESS_REMOTE_WRITE
| IBV_ACCESS_LOCAL_WRITE);
if (!mr) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "memory registration failed");
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "memory registration failed (%s) (peer:%s)",
+ strerror (errno),
+ peer->trans->peerinfo.identifier);
goto out;
}
@@ -670,7 +1451,7 @@ __gf_rdma_create_write_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
entry->msg.request.rsp_payload_count,
request_ctx);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"cannot create write chunks from vector "
"entry->rpc_payload");
goto out;
@@ -700,7 +1481,7 @@ __gf_rdma_create_write_chunks (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
entry->msg.request.rsphdr_count,
request_ctx);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"cannot create write chunks from vector "
"entry->rpchdr");
goto out;
@@ -716,7 +1497,7 @@ out:
}
-inline void
+static inline void
__gf_rdma_deregister_mr (struct ibv_mr **mr, int count)
{
int i = 0;
@@ -837,28 +1618,6 @@ out:
}
-static int32_t
-gf_rdma_post_recv (struct ibv_srq *srq,
- gf_rdma_post_t *post)
-{
- struct ibv_sge list = {
- .addr = (unsigned long) post->buf,
- .length = post->buf_size,
- .lkey = post->mr->lkey
- };
-
- struct ibv_recv_wr wr = {
- .wr_id = (unsigned long) post,
- .sg_list = &list,
- .num_sge = 1,
- }, *bad_wr;
-
- gf_rdma_post_ref (post);
-
- return ibv_post_srq_recv (srq, &wr, &bad_wr);
-}
-
-
int
gf_rdma_post_unref (gf_rdma_post_t *post)
{
@@ -1044,10 +1803,11 @@ __gf_rdma_ioq_churn_request (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
chunkptr = &hdr->rm_body.rm_chunks[0];
if (rtype != gf_rdma_noch) {
- ret = __gf_rdma_create_read_chunks (peer, entry, rtype, &chunkptr,
+ ret = __gf_rdma_create_read_chunks (peer, entry, rtype,
+ &chunkptr,
request_ctx);
if (ret != 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"creation of read chunks failed");
goto out;
}
@@ -1056,10 +1816,11 @@ __gf_rdma_ioq_churn_request (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
}
if (wtype != gf_rdma_noch) {
- ret = __gf_rdma_create_write_chunks (peer, entry, wtype, &chunkptr,
+ ret = __gf_rdma_create_write_chunks (peer, entry, wtype,
+ &chunkptr,
request_ctx);
if (ret != 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"creation of write/reply chunk failed");
goto out;
}
@@ -1117,7 +1878,7 @@ out:
}
-inline void
+static inline void
__gf_rdma_fill_reply_header (gf_rdma_header_t *header, struct iovec *rpchdr,
gf_rdma_reply_info_t *reply_info, int credits)
{
@@ -1147,11 +1908,12 @@ __gf_rdma_fill_reply_header (gf_rdma_header_t *header, struct iovec *rpchdr,
int32_t
__gf_rdma_send_reply_inline (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_post_t *post, gf_rdma_reply_info_t *reply_info)
+ gf_rdma_post_t *post,
+ gf_rdma_reply_info_t *reply_info)
{
- gf_rdma_header_t *header = NULL;
- int32_t send_size = 0, ret = 0;
- char *buf = NULL;
+ gf_rdma_header_t *header = NULL;
+ int32_t send_size = 0, ret = 0;
+ char *buf = NULL;
send_size = iov_length (entry->rpchdr, entry->rpchdr_count)
+ iov_length (entry->proghdr, entry->proghdr_count)
@@ -1164,6 +1926,10 @@ __gf_rdma_send_reply_inline (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
if (send_size > GLUSTERFS_RDMA_INLINE_THRESHOLD) {
ret = __gf_rdma_send_error (peer, entry, post, reply_info,
ERR_CHUNK);
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "msg size (%d) is greater than maximum size "
+ "of msg that can be sent inlined (%d)",
+ send_size, GLUSTERFS_RDMA_INLINE_THRESHOLD);
goto out;
}
@@ -1198,7 +1964,7 @@ __gf_rdma_send_reply_inline (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
ret = send_size;
} else {
gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
- "gf_rdma_post_send (to %s) failed with ret = %d (%s)",
+ "posting send (to %s) failed with ret = %d (%s)",
peer->trans->peerinfo.identifier, ret,
(ret > 0) ? strerror (ret) : "");
gf_rdma_post_unref (post);
@@ -1271,9 +2037,8 @@ __gf_rdma_register_local_mr_for_rdma (gf_rdma_peer_t *peer,
gf_rdma_private_t *priv = NULL;
gf_rdma_device_t *device = NULL;
- if ((ctx == NULL) || (vector == NULL)) {
- goto out;
- }
+ GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, ctx, out);
+ GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, vector, out);
priv = peer->trans->private;
device = priv->device;
@@ -1294,6 +2059,9 @@ __gf_rdma_register_local_mr_for_rdma (gf_rdma_peer_t *peer,
vector[i].iov_len,
IBV_ACCESS_LOCAL_WRITE);
if (ctx->mr[ctx->mr_count] == NULL) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "registering memory for IBV_ACCESS_LOCAL_WRITE "
+ "failed (%s)", strerror (errno));
goto out;
}
@@ -1364,7 +2132,8 @@ __gf_rdma_write (gf_rdma_peer_t *peer, gf_rdma_post_t *post, struct iovec *vec,
ret = ibv_post_send(peer->qp, &wr, &bad_wr);
if (ret) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "rdma write to "
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma write to "
"client (%s) failed with ret = %d (%s)",
peer->trans->peerinfo.identifier, ret,
(ret > 0) ? strerror (ret) : "");
@@ -1399,6 +2168,8 @@ __gf_rdma_do_gf_rdma_write (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
ret = __gf_rdma_register_local_mr_for_rdma (peer, vector, count,
&post->ctx);
if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "registering memory region for rdma failed");
goto out;
}
@@ -1410,9 +2181,13 @@ __gf_rdma_do_gf_rdma_write (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
xfer_len = min (payload_size,
reply_info->wc_array->wc_array[i].wc_target.rs_length);
- ret = __gf_rdma_write (peer, post, vector, xfer_len, &payload_idx,
+ ret = __gf_rdma_write (peer, post, vector, xfer_len,
+ &payload_idx,
&reply_info->wc_array->wc_array[i]);
if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma write to client (%s) failed",
+ peer->trans->peerinfo.identifier);
goto out;
}
@@ -1431,12 +2206,12 @@ __gf_rdma_send_reply_type_nomsg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
gf_rdma_post_t *post,
gf_rdma_reply_info_t *reply_info)
{
- gf_rdma_header_t *header = NULL;
- char *buf = NULL;
- uint32_t payload_size = 0;
- int count = 0, i = 0;
- int32_t ret = 0;
- struct iovec vector[MAX_IOVEC];
+ gf_rdma_header_t *header = NULL;
+ char *buf = NULL;
+ uint32_t payload_size = 0;
+ int count = 0, i = 0;
+ int32_t ret = 0;
+ struct iovec vector[MAX_IOVEC];
header = (gf_rdma_header_t *)post->buf;
@@ -1451,9 +2226,10 @@ __gf_rdma_send_reply_type_nomsg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
/* encode reply chunklist */
buf = (char *)&header->rm_body.rm_chunks[2];
ret = __gf_rdma_reply_encode_write_chunks (peer, payload_size, post,
- reply_info, (uint32_t **)&buf);
+ reply_info,
+ (uint32_t **)&buf);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"encoding write chunks failed");
ret = __gf_rdma_send_error (peer, entry, post, reply_info,
ERR_CHUNK);
@@ -1473,6 +2249,9 @@ __gf_rdma_send_reply_type_nomsg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
ret = __gf_rdma_do_gf_rdma_write (peer, post, vector, count,
entry->iobref, reply_info);
if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma write to peer (%s) failed",
+ peer->trans->peerinfo.identifier);
gf_rdma_post_unref (post);
goto out;
}
@@ -1480,7 +2259,7 @@ __gf_rdma_send_reply_type_nomsg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
ret = gf_rdma_post_send (peer->qp, post, (buf - post->buf));
if (ret) {
gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
- "gf_rdma_post_send to client (%s) failed with "
+ "posting a send request to client (%s) failed with "
"ret = %d (%s)", peer->trans->peerinfo.identifier, ret,
(ret > 0) ? strerror (ret) : "");
ret = -1;
@@ -1496,12 +2275,13 @@ out:
int32_t
__gf_rdma_send_reply_type_msg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
- gf_rdma_post_t *post, gf_rdma_reply_info_t *reply_info)
+ gf_rdma_post_t *post,
+ gf_rdma_reply_info_t *reply_info)
{
- gf_rdma_header_t *header = NULL;
- int32_t send_size = 0, ret = 0;
- char *ptr = NULL;
- uint32_t payload_size = 0;
+ gf_rdma_header_t *header = NULL;
+ int32_t send_size = 0, ret = 0;
+ char *ptr = NULL;
+ uint32_t payload_size = 0;
send_size = iov_length (entry->rpchdr, entry->rpchdr_count)
+ iov_length (entry->proghdr, entry->proghdr_count)
@@ -1533,7 +2313,7 @@ __gf_rdma_send_reply_type_msg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
reply_info,
(uint32_t **)&ptr);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"encoding write chunks failed");
ret = __gf_rdma_send_error (peer, entry, post, reply_info,
ERR_CHUNK);
@@ -1549,6 +2329,8 @@ __gf_rdma_send_reply_type_msg (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
entry->prog_payload_count,
entry->iobref, reply_info);
if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING, "rdma write to peer "
+ "(%s) failed", peer->trans->peerinfo.identifier);
gf_rdma_post_unref (post);
goto out;
}
@@ -1623,9 +2405,9 @@ __gf_rdma_ioq_churn_reply (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
int32_t ret = -1;
gf_rdma_chunktype_t type = gf_rdma_noch;
- if ((peer == NULL) || (entry == NULL) || (post == NULL)) {
- goto out;
- }
+ GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, peer, out);
+ GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, entry, out);
+ GF_VALIDATE_OR_GOTO (GF_RDMA_LOG_NAME, post, out);
reply_info = entry->msg.reply_info;
if (reply_info != NULL) {
@@ -1636,22 +2418,39 @@ __gf_rdma_ioq_churn_reply (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry,
case gf_rdma_noch:
ret = __gf_rdma_send_reply_inline (peer, entry, post,
reply_info);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "failed to send reply to peer (%s) as an "
+ "inlined rdma msg",
+ peer->trans->peerinfo.identifier);
+ }
break;
case gf_rdma_replych:
ret = __gf_rdma_send_reply_type_nomsg (peer, entry, post,
reply_info);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "failed to send reply to peer (%s) as "
+ "RDMA_NOMSG", peer->trans->peerinfo.identifier);
+ }
break;
case gf_rdma_writech:
ret = __gf_rdma_send_reply_type_msg (peer, entry, post,
reply_info);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "failed to send reply with write chunks "
+ "to peer (%s)",
+ peer->trans->peerinfo.identifier);
+ }
break;
default:
gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
- "invalid chunktype (%d) specified for sending reply",
- type);
+ "invalid chunktype (%d) specified for sending reply "
+ " (peer:%s)", type, peer->trans->peerinfo.identifier);
break;
}
@@ -1680,20 +2479,34 @@ __gf_rdma_ioq_churn_entry (gf_rdma_peer_t *peer, gf_rdma_ioq_t *entry)
if (quota > 0) {
post = gf_rdma_get_post (&device->sendq);
if (post == NULL) {
- post = gf_rdma_new_post (device,
+ post = gf_rdma_new_post (peer->trans, device,
(options->send_size + 2048),
GF_RDMA_SEND_POST);
}
if (post == NULL) {
ret = -1;
+ gf_log_callingfn (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "not able to get a post to send msg");
goto out;
}
if (entry->is_request) {
ret = __gf_rdma_ioq_churn_request (peer, entry, post);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "failed to process request ioq entry "
+ "to peer(%s)",
+ peer->trans->peerinfo.identifier);
+ }
} else {
ret = __gf_rdma_ioq_churn_reply (peer, entry, post);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "failed to process reply ioq entry "
+ "to peer (%s)",
+ peer->trans->peerinfo.identifier);
+ }
}
if (ret != 0) {
@@ -1749,9 +2562,9 @@ gf_rdma_writev (rpc_transport_t *this, gf_rdma_ioq_t *entry)
pthread_mutex_lock (&priv->write_mutex);
{
if (!priv->connected) {
- gf_log (this->name, GF_LOG_DEBUG,
- "rdma is not connected to post a "
- "send request");
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma is not connected to peer (%s)",
+ this->peerinfo.identifier);
ret = -1;
goto unlock;
}
@@ -1761,6 +2574,13 @@ gf_rdma_writev (rpc_transport_t *this, gf_rdma_ioq_t *entry)
ret = __gf_rdma_ioq_churn_entry (peer, entry);
if (ret != 0) {
need_append = 0;
+
+ if (ret < 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "processing ioq entry destined "
+ "to (%s) failed",
+ this->peerinfo.identifier);
+ }
}
}
@@ -1880,6 +2700,9 @@ gf_rdma_submit_request (rpc_transport_t *this, rpc_transport_req_t *req)
entry = gf_rdma_ioq_new (this, &data);
if (entry == NULL) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "getting a new ioq entry failed (peer:%s)",
+ this->peerinfo.identifier);
goto out;
}
@@ -1888,6 +2711,9 @@ gf_rdma_submit_request (rpc_transport_t *this, rpc_transport_req_t *req)
if (ret > 0) {
ret = 0;
} else if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "sending request to peer (%s) failed",
+ this->peerinfo.identifier);
rpc_transport_disconnect (this);
}
@@ -1910,6 +2736,9 @@ gf_rdma_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply)
entry = gf_rdma_ioq_new (this, &data);
if (entry == NULL) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "getting a new ioq entry failed (peer:%s)",
+ this->peerinfo.identifier);
goto out;
}
@@ -1917,6 +2746,9 @@ gf_rdma_submit_reply (rpc_transport_t *this, rpc_transport_reply_t *reply)
if (ret > 0) {
ret = 0;
} else if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "sending request to peer (%s) failed",
+ this->peerinfo.identifier);
rpc_transport_disconnect (this);
}
@@ -1924,189 +2756,6 @@ out:
return ret;
}
-#if 0
-static int
-gf_rdma_receive (rpc_transport_t *this, char **hdr_p, size_t *hdrlen_p,
- struct iobuf **iobuf_p)
-{
- gf_rdma_private_t *priv = this->private;
- /* TODO: return error if !priv->connected, check with locks */
- /* TODO: boundry checks for data_ptr/offset */
- char *copy_from = NULL;
- gf_rdma_header_t *header = NULL;
- uint32_t size1, size2, data_len = 0;
- char *hdr = NULL;
- struct iobuf *iobuf = NULL;
- int32_t ret = 0;
-
- pthread_mutex_lock (&priv->recv_mutex);
- {
-/*
- while (!priv->data_ptr)
- pthread_cond_wait (&priv->recv_cond, &priv->recv_mutex);
-*/
-
- copy_from = priv->data_ptr + priv->data_offset;
-
- priv->data_ptr = NULL;
- data_len = priv->data_len;
- pthread_cond_broadcast (&priv->recv_cond);
- }
- pthread_mutex_unlock (&priv->recv_mutex);
-
- header = (gf_rdma_header_t *)copy_from;
- if (strcmp (header->colonO, ":O")) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "%s: corrupt header received", this->name);
- ret = -1;
- goto err;
- }
-
- size1 = ntoh32 (header->size1);
- size2 = ntoh32 (header->size2);
-
- if (data_len != (size1 + size2 + sizeof (*header))) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "%s: sizeof data read from transport is not equal "
- "to the size specified in the header",
- this->name);
- ret = -1;
- goto err;
- }
-
- copy_from += sizeof (*header);
-
- if (size1) {
- hdr = GF_CALLOC (1, size1, gf_common_mt_char);
- if (!hdr) {
- gf_log (this->name, GF_LOG_ERROR,
- "unable to allocate header for peer %s",
- this->peerinfo.identifier);
- ret = -ENOMEM;
- goto err;
- }
- memcpy (hdr, copy_from, size1);
- copy_from += size1;
- *hdr_p = hdr;
- }
- *hdrlen_p = size1;
-
- if (size2) {
- iobuf = iobuf_get2 (this->ctx->iobuf_pool, size2);
- if (!iobuf) {
- gf_log (this->name, GF_LOG_ERROR,
- "unable to allocate IO buffer for peer %s",
- this->peerinfo.identifier);
- ret = -ENOMEM;
- goto err;
- }
- memcpy (iobuf->ptr, copy_from, size2);
- *iobuf_p = iobuf;
- }
-
-err:
- return ret;
-}
-#endif
-
-
-static void
-gf_rdma_destroy_cq (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
-
- priv = this->private;
- device = priv->device;
-
- if (device->recv_cq)
- ibv_destroy_cq (device->recv_cq);
- device->recv_cq = NULL;
-
- if (device->send_cq)
- ibv_destroy_cq (device->send_cq);
- device->send_cq = NULL;
-
- return;
-}
-
-
-static int32_t
-gf_rdma_create_cq (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- gf_rdma_device_t *device = NULL;
- uint64_t send_cqe = 0;
- int32_t ret = 0;
- struct ibv_device_attr device_attr = {{0}, };
-
- priv = this->private;
- options = &priv->options;
- device = priv->device;
-
- device->recv_cq = ibv_create_cq (priv->device->context,
- options->recv_count * 2,
- device,
- device->recv_chan,
- 0);
- if (!device->recv_cq) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: creation of CQ for device %s failed",
- this->name, device->device_name);
- ret = -1;
- goto out;
- } else if (ibv_req_notify_cq (device->recv_cq, 0)) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: ibv_req_notify_cq on recv CQ of device %s failed",
- this->name, device->device_name);
- ret = -1;
- goto out;
- }
-
- do {
- ret = ibv_query_device (priv->device->context, &device_attr);
- if (ret != 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: ibv_query_device on %s returned %d (%s)",
- this->name, priv->device->device_name, ret,
- (ret > 0) ? strerror (ret) : "");
- ret = -1;
- goto out;
- }
-
- send_cqe = options->send_count * 128;
- send_cqe = (send_cqe > device_attr.max_cqe)
- ? device_attr.max_cqe : send_cqe;
-
- /* TODO: make send_cq size dynamically adaptive */
- device->send_cq = ibv_create_cq (priv->device->context,
- send_cqe, device,
- device->send_chan, 0);
- if (!device->send_cq) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: creation of send_cq for device %s failed",
- this->name, device->device_name);
- ret = -1;
- goto out;
- }
-
- if (ibv_req_notify_cq (device->send_cq, 0)) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: ibv_req_notify_cq on send_cq for device %s"
- " failed", this->name, device->device_name);
- ret = -1;
- goto out;
- }
- } while (0);
-
-out:
- if (ret != 0)
- gf_rdma_destroy_cq (this);
-
- return ret;
-}
-
static int
gf_rdma_register_peer (gf_rdma_device_t *device, int32_t qp_num,
@@ -2205,25 +2854,6 @@ __gf_rdma_lookup_peer (gf_rdma_device_t *device, int32_t qp_num)
return peer;
}
-/*
- static gf_rdma_peer_t *
- gf_rdma_lookup_peer (gf_rdma_device_t *device,
- int32_t qp_num)
- {
- gf_rdma_qpreg_t *qpreg = NULL;
- gf_rdma_peer_t *peer = NULL;
-
- qpreg = &device->qpreg;
- pthread_mutex_lock (&qpreg->lock);
- {
- peer = __gf_rdma_lookup_peer (device, qp_num);
- }
- pthread_mutex_unlock (&qpreg->lock);
-
- return peer;
- }
-*/
-
static void
__gf_rdma_destroy_qp (rpc_transport_t *this)
@@ -2233,7 +2863,7 @@ __gf_rdma_destroy_qp (rpc_transport_t *this)
priv = this->private;
if (priv->peer.qp) {
gf_rdma_unregister_peer (priv->device, priv->peer.qp->qp_num);
- ibv_destroy_qp (priv->peer.qp);
+ rdma_destroy_qp (priv->peer.cm_id);
}
priv->peer.qp = NULL;
@@ -2244,18 +2874,36 @@ __gf_rdma_destroy_qp (rpc_transport_t *this)
static int32_t
gf_rdma_create_qp (rpc_transport_t *this)
{
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- gf_rdma_device_t *device = NULL;
- int32_t ret = 0;
- gf_rdma_peer_t *peer = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_device_t *device = NULL;
+ int32_t ret = 0;
+ gf_rdma_peer_t *peer = NULL;
+ char *device_name = NULL;
priv = this->private;
- options = &priv->options;
- device = priv->device;
peer = &priv->peer;
+ device_name = (char *)ibv_get_device_name (peer->cm_id->verbs->device);
+ if (device_name == NULL) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_WARNING, "cannot get device_name");
+ goto out;
+ }
+
+ device = gf_rdma_get_device (this, peer->cm_id->verbs,
+ device_name);
+ if (device == NULL) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_WARNING, "cannot get device for "
+ "device %s", device_name);
+ goto out;
+ }
+
+ if (priv->device == NULL) {
+ priv->device = device;
+ }
+
struct ibv_qp_init_attr init_attr = {
.send_cq = device->send_cq,
.recv_cq = device->recv_cq,
@@ -2269,39 +2917,16 @@ gf_rdma_create_qp (rpc_transport_t *this)
.qp_type = IBV_QPT_RC
};
- struct ibv_qp_attr attr = {
- .qp_state = IBV_QPS_INIT,
- .pkey_index = 0,
- .port_num = options->port,
- .qp_access_flags
- = IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE
- };
-
- peer->qp = ibv_create_qp (device->pd, &init_attr);
- if (!peer->qp) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "%s: could not create QP",
- this->name);
- ret = -1;
- goto out;
- } else if (ibv_modify_qp (peer->qp, &attr,
- IBV_QP_STATE |
- IBV_QP_PKEY_INDEX |
- IBV_QP_PORT |
- IBV_QP_ACCESS_FLAGS)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_ERROR,
- "%s: failed to modify QP to INIT state",
- this->name);
+ ret = rdma_create_qp(peer->cm_id, device->pd, &init_attr);
+ if (ret != 0) {
+ gf_log (peer->trans->name, GF_LOG_CRITICAL,
+ "%s: could not create QP (%s)", this->name,
+ strerror (errno));
ret = -1;
goto out;
}
- peer->local_lid = gf_rdma_get_local_lid (device->context,
- options->port);
- peer->local_qpn = peer->qp->qp_num;
- peer->local_psn = lrand48 () & 0xffffff;
+ peer->qp = peer->cm_id->qp;
ret = gf_rdma_register_peer (device, peer->qp->qp_num, peer);
@@ -2313,300 +2938,52 @@ out:
}
-static void
-gf_rdma_destroy_posts (rpc_transport_t *this)
-{
-
-}
-
-
static int32_t
-__gf_rdma_create_posts (rpc_transport_t *this, int32_t count, int32_t size,
- gf_rdma_queue_t *q, gf_rdma_post_type_t type)
-{
- int32_t i = 0;
- int32_t ret = 0;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_device_t *device = NULL;
-
- priv = this->private;
- device = priv->device;
-
- for (i=0 ; i<count ; i++) {
- gf_rdma_post_t *post = NULL;
-
- post = gf_rdma_new_post (device, size + 2048, type);
- if (!post) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: post creation failed",
- this->name);
- ret = -1;
- break;
- }
-
- gf_rdma_put_post (q, post);
- }
- return ret;
-}
-
-
-static int32_t
-gf_rdma_create_posts (rpc_transport_t *this)
+__gf_rdma_teardown (rpc_transport_t *this)
{
- int32_t i = 0, ret = 0;
- gf_rdma_post_t *post = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- gf_rdma_device_t *device = NULL;
+ gf_rdma_private_t *priv = NULL;
+ gf_rdma_peer_t *peer = NULL;
priv = this->private;
- options = &priv->options;
- device = priv->device;
-
- ret = __gf_rdma_create_posts (this, options->send_count,
- options->send_size,
- &device->sendq, GF_RDMA_SEND_POST);
- if (!ret)
- ret = __gf_rdma_create_posts (this, options->recv_count,
- options->recv_size,
- &device->recvq,
- GF_RDMA_RECV_POST);
+ peer = &priv->peer;
- if (!ret) {
- for (i=0 ; i<options->recv_count ; i++) {
- post = gf_rdma_get_post (&device->recvq);
- if (gf_rdma_post_recv (device->srq, post) != 0) {
- ret = -1;
- break;
- }
- }
+ if (peer->cm_id->qp != NULL) {
+ __gf_rdma_destroy_qp (this);
}
- if (ret)
- gf_rdma_destroy_posts (this);
-
- return ret;
-}
-
-
-static int32_t
-gf_rdma_connect_qp (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = this->private;
- gf_rdma_options_t *options = &priv->options;
- struct ibv_qp_attr attr = {
- .qp_state = IBV_QPS_RTR,
- .path_mtu = options->mtu,
- .dest_qp_num = priv->peer.remote_qpn,
- .rq_psn = priv->peer.remote_psn,
- .max_dest_rd_atomic = 1,
- .min_rnr_timer = 12,
- .qp_access_flags
- = IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE,
- .ah_attr = {
- .is_global = 0,
- .dlid = priv->peer.remote_lid,
- .sl = 0,
- .src_path_bits = 0,
- .port_num = options->port
- }
- };
- if (ibv_modify_qp (priv->peer.qp, &attr,
- IBV_QP_STATE |
- IBV_QP_AV |
- IBV_QP_PATH_MTU |
- IBV_QP_DEST_QPN |
- IBV_QP_RQ_PSN |
- IBV_QP_MAX_DEST_RD_ATOMIC |
- IBV_QP_MIN_RNR_TIMER)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "Failed to modify QP to RTR\n");
- return -1;
+ if (!list_empty (&priv->peer.ioq)) {
+ __gf_rdma_ioq_flush (peer);
}
- attr.qp_state = IBV_QPS_RTS;
- attr.timeout = options->attr_timeout;
- attr.retry_cnt = options->attr_retry_cnt;
- attr.rnr_retry = options->attr_rnr_retry;
- attr.sq_psn = priv->peer.local_psn;
- attr.max_rd_atomic = 1;
- if (ibv_modify_qp (priv->peer.qp, &attr,
- IBV_QP_STATE |
- IBV_QP_TIMEOUT |
- IBV_QP_RETRY_CNT |
- IBV_QP_RNR_RETRY |
- IBV_QP_SQ_PSN |
- IBV_QP_MAX_QP_RD_ATOMIC)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "Failed to modify QP to RTS\n");
- return -1;
+ if (peer->cm_id != NULL) {
+ rdma_destroy_id (peer->cm_id);
+ peer->cm_id = NULL;
}
+ /* TODO: decrement cq size */
return 0;
}
+
static int32_t
-__gf_rdma_teardown (rpc_transport_t *this)
+gf_rdma_teardown (rpc_transport_t *this)
{
+ int32_t ret = 0;
gf_rdma_private_t *priv = NULL;
- priv = this->private;
- __gf_rdma_destroy_qp (this);
-
- if (!list_empty (&priv->peer.ioq)) {
- __gf_rdma_ioq_flush (&priv->peer);
+ if (this == NULL) {
+ goto out;
}
- /* TODO: decrement cq size */
- return 0;
-}
-
-/*
- * return value:
- * 0 = success (completed)
- * -1 = error
- * > 0 = incomplete
- */
-
-static int
-__tcp_rwv (rpc_transport_t *this, struct iovec *vector, int count,
- struct iovec **pending_vector, int *pending_count,
- int write)
-{
- gf_rdma_private_t *priv = NULL;
- int sock = -1;
- int ret = -1;
- struct iovec *opvector = NULL;
- int opcount = 0;
- int moved = 0;
-
priv = this->private;
- sock = priv->sock;
- opvector = vector;
- opcount = count;
- while (opcount)
+ pthread_mutex_lock (&priv->write_mutex);
{
- if (write)
- {
- ret = writev (sock, opvector, opcount);
-
- if (ret == 0 || (ret == -1 && errno == EAGAIN))
- {
- /* done for now */
- break;
- }
- }
- else
- {
- ret = readv (sock, opvector, opcount);
-
- if (ret == -1 && errno == EAGAIN)
- {
- /* done for now */
- break;
- }
- }
-
- if (ret == 0)
- {
- gf_log (this->name, GF_LOG_DEBUG,
- "EOF from peer %s", this->peerinfo.identifier);
- opcount = -1;
- errno = ENOTCONN;
- break;
- }
-
- if (ret == -1)
- {
- if (errno == EINTR)
- continue;
-
- gf_log (this->name, GF_LOG_DEBUG,
- "%s failed (%s)", write ? "writev" : "readv",
- strerror (errno));
- if (write && !priv->connected &&
- (errno == ECONNREFUSED))
- gf_log (this->name, GF_LOG_ERROR,
- "possible mismatch of 'rpc-transport-type'"
- " in protocol server and client. "
- "check volume file");
- opcount = -1;
- break;
- }
-
- moved = 0;
-
- while (moved < ret)
- {
- if ((ret - moved) >= opvector[0].iov_len)
- {
- moved += opvector[0].iov_len;
- opvector++;
- opcount--;
- }
- else
- {
- opvector[0].iov_len -= (ret - moved);
- opvector[0].iov_base += (ret - moved);
- moved += (ret - moved);
- }
- while (opcount && !opvector[0].iov_len)
- {
- opvector++;
- opcount--;
- }
- }
- }
-
- if (pending_vector)
- *pending_vector = opvector;
-
- if (pending_count)
- *pending_count = opcount;
-
- return opcount;
-}
-
-
-static int
-__tcp_readv (rpc_transport_t *this, struct iovec *vector, int count,
- struct iovec **pending_vector, int *pending_count)
-{
- int ret = -1;
-
- ret = __tcp_rwv (this, vector, count,
- pending_vector, pending_count, 0);
-
- return ret;
-}
-
-
-static int
-__tcp_writev (rpc_transport_t *this, struct iovec *vector, int count,
- struct iovec **pending_vector, int *pending_count)
-{
- int ret = -1;
- gf_rdma_private_t *priv = NULL;
-
- priv = this->private;
-
- ret = __tcp_rwv (this, vector, count, pending_vector,
- pending_count, 1);
-
- if (ret > 0) {
- /* TODO: Avoid multiple calls when socket is already
- registered for POLLOUT */
- priv->idx = event_select_on (this->ctx->event_pool,
- priv->sock, priv->idx, -1, 1);
- } else if (ret == 0) {
- priv->idx = event_select_on (this->ctx->event_pool,
- priv->sock,
- priv->idx, -1, 0);
+ ret = __gf_rdma_teardown (this);
}
+ pthread_mutex_unlock (&priv->write_mutex);
+out:
return ret;
}
@@ -2701,10 +3078,11 @@ inline int32_t
gf_rdma_decode_error_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
size_t bytes_in_post)
{
- gf_rdma_header_t *header = NULL;
- struct iobuf *iobuf = NULL;
- struct iobref *iobref = NULL;
- int32_t ret = -1;
+ gf_rdma_header_t *header = NULL;
+ struct iobuf *iobuf = NULL;
+ struct iobref *iobref = NULL;
+ int32_t ret = -1;
+ struct rpc_msg rpc_msg = {0, };
header = (gf_rdma_header_t *)post->buf;
header->rm_body.rm_error.rm_type
@@ -2716,6 +3094,10 @@ gf_rdma_decode_error_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
ntoh32 (header->rm_body.rm_error.rm_version.gf_rdma_vers_high);
}
+ rpc_msg.rm_xid = header->rm_xid;
+ rpc_msg.rm_direction = REPLY;
+ rpc_msg.rm_reply.rp_stat = MSG_DENIED;
+
iobuf = iobuf_get2 (peer->trans->ctx->iobuf_pool, bytes_in_post);
if (iobuf == NULL) {
ret = -1;
@@ -2730,15 +3112,15 @@ gf_rdma_decode_error_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
iobref_add (iobref, iobuf);
iobuf_unref (iobuf);
- /*
- * FIXME: construct an appropriate rpc-msg here, what is being sent
- * to rpc is not correct.
- */
- post->ctx.vector[0].iov_base = iobuf_ptr (iobuf);
- post->ctx.vector[0].iov_len = bytes_in_post;
-
- memcpy (post->ctx.vector[0].iov_base, (char *)post->buf,
- post->ctx.vector[0].iov_len);
+
+ ret = rpc_reply_to_xdr (&rpc_msg, iobuf_ptr (iobuf),
+ iobuf_pagesize (iobuf), &post->ctx.vector[0]);
+ if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "Failed to create RPC reply");
+ goto out;
+ }
+
post->ctx.count = 1;
iobuf = NULL;
@@ -2776,6 +3158,8 @@ gf_rdma_decode_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
ret = gf_rdma_get_read_chunklist (&ptr, readch);
if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "cannot get read chunklist from msg");
goto out;
}
@@ -2784,6 +3168,8 @@ gf_rdma_decode_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
ret = gf_rdma_get_write_chunklist (&ptr, &write_ary);
if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "cannot get write chunklist from msg");
goto out;
}
@@ -2793,6 +3179,8 @@ gf_rdma_decode_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
if (write_ary != NULL) {
reply_info = gf_rdma_reply_info_alloc (peer);
if (reply_info == NULL) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "reply_info_alloc failed");
ret = -1;
goto out;
}
@@ -2803,12 +3191,16 @@ gf_rdma_decode_msg (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
} else {
ret = gf_rdma_get_write_chunklist (&ptr, &write_ary);
if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "cannot get reply chunklist from msg");
goto out;
}
if (write_ary != NULL) {
reply_info = gf_rdma_reply_info_alloc (peer);
if (reply_info == NULL) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "reply_info_alloc_failed");
ret = -1;
goto out;
}
@@ -2846,9 +3238,7 @@ out:
*readch = NULL;
}
- if (write_ary != NULL) {
- GF_FREE (write_ary);
- }
+ GF_FREE (write_ary);
}
return ret;
@@ -2874,28 +3264,36 @@ gf_rdma_decode_header (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
case GF_RDMA_MSG:
case GF_RDMA_NOMSG:
ret = gf_rdma_decode_msg (peer, post, readch, bytes_in_post);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "cannot decode msg of type (%d)",
+ header->rm_type);
+ }
+
break;
case GF_RDMA_MSGP:
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"rdma msg of msg-type GF_RDMA_MSGP should not have "
"been received");
ret = -1;
break;
case GF_RDMA_DONE:
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"rdma msg of msg-type GF_RDMA_DONE should not have "
"been received");
ret = -1;
break;
case GF_RDMA_ERROR:
- /* ret = gf_rdma_decode_error_msg (peer, post, bytes_in_post); */
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "received a msg of type RDMA_ERROR");
+ ret = gf_rdma_decode_error_msg (peer, post, bytes_in_post);
break;
default:
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"unknown rdma msg-type (%d)", header->rm_type);
}
@@ -2913,6 +3311,8 @@ __gf_rdma_read (gf_rdma_peer_t *peer, gf_rdma_post_t *post, struct iovec *to,
ret = __gf_rdma_register_local_mr_for_rdma (peer, to, 1, &post->ctx);
if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "registering local memory for rdma read failed");
goto out;
}
@@ -2930,7 +3330,8 @@ __gf_rdma_read (gf_rdma_peer_t *peer, gf_rdma_post_t *post, struct iovec *to,
ret = ibv_post_send (peer->qp, &wr, &bad_wr);
if (ret) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG, "rdma read from client "
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma read from client "
"(%s) failed with ret = %d (%s)",
peer->trans->peerinfo.identifier,
ret, (ret > 0) ? strerror (ret) : "");
@@ -2959,7 +3360,7 @@ gf_rdma_do_reads (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
}
if (i == 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"message type specified as rdma-read but there are no "
"rdma read-chunks present");
goto out;
@@ -2989,6 +3390,10 @@ gf_rdma_do_reads (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
pthread_mutex_lock (&priv->write_mutex);
{
if (!priv->connected) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "transport not connected to peer (%s), "
+ "not doing rdma reads",
+ peer->trans->peerinfo.identifier);
goto unlock;
}
@@ -3002,6 +3407,9 @@ gf_rdma_do_reads (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
&post->ctx.vector[count],
&readch[i]);
if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma read from peer (%s) failed",
+ peer->trans->peerinfo.identifier);
goto unlock;
}
@@ -3108,6 +3516,10 @@ gf_rdma_pollin_notify (gf_rdma_peer_t *peer, gf_rdma_post_t *post)
ret = rpc_transport_notify (peer->trans, RPC_TRANSPORT_MSG_RECEIVED,
pollin);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "transport_notify failed");
+ }
out:
if (pollin != NULL) {
@@ -3168,15 +3580,15 @@ gf_rdma_recv_reply (gf_rdma_peer_t *peer, gf_rdma_post_t *post)
RPC_TRANSPORT_MAP_XID_REQUEST,
&request_info);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "cannot get request information from rpc "
- "layer");
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "cannot get request information (peer:%s) from rpc "
+ "layer", peer->trans->peerinfo.identifier);
goto out;
}
rpc_req = request_info.rpc_req;
if (rpc_req == NULL) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"rpc request structure not found");
ret = -1;
goto out;
@@ -3194,6 +3606,10 @@ gf_rdma_recv_reply (gf_rdma_peer_t *peer, gf_rdma_post_t *post)
out:
if (ret == 0) {
ret = gf_rdma_pollin_notify (peer, post);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "pollin notify failed");
+ }
}
return ret;
@@ -3208,10 +3624,15 @@ gf_rdma_recv_request (gf_rdma_peer_t *peer, gf_rdma_post_t *post,
if (readch != NULL) {
ret = gf_rdma_do_reads (peer, post, readch);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma read from peer (%s) failed",
+ peer->trans->peerinfo.identifier);
+ }
} else {
ret = gf_rdma_pollin_notify (peer, post);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"pollin notification failed");
}
}
@@ -3228,23 +3649,44 @@ gf_rdma_process_recv (gf_rdma_peer_t *peer, struct ibv_wc *wc)
uint32_t *ptr = NULL;
enum msg_type msg_type = 0;
gf_rdma_header_t *header = NULL;
+ gf_rdma_private_t *priv = NULL;
post = (gf_rdma_post_t *) (long) wc->wr_id;
if (post == NULL) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"no post found in successful work completion element");
goto out;
}
ret = gf_rdma_decode_header (peer, post, &readch, wc->byte_len);
if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"decoding of header failed");
goto out;
}
header = (gf_rdma_header_t *)post->buf;
+ priv = peer->trans->private;
+
+ pthread_mutex_lock (&priv->write_mutex);
+ {
+ if (!priv->peer.quota_set) {
+ priv->peer.quota_set = 1;
+
+ /* Initially peer.quota is set to 1 as per RFC 5666. We
+ * have to account for the quota used while sending
+ * first msg (which may or may not be returned to pool
+ * at this point) while deriving peer.quota from
+ * header->rm_credit. Hence the arithmatic below,
+ * instead of directly setting it to header->rm_credit.
+ */
+ priv->peer.quota = header->rm_credit
+ - ( 1 - priv->peer.quota);
+ }
+ }
+ pthread_mutex_unlock (&priv->write_mutex);
+
switch (header->rm_type) {
case GF_RDMA_MSG:
ptr = (uint32_t *)post->ctx.vector[0].iov_base;
@@ -3260,30 +3702,48 @@ gf_rdma_process_recv (gf_rdma_peer_t *peer, struct ibv_wc *wc)
break;
case GF_RDMA_ERROR:
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "an error has happened while transmission of msg, "
- "disconnecting the transport");
- rpc_transport_disconnect (peer->trans);
- goto out;
-
-/* ret = gf_rdma_pollin_notify (peer, post);
- if (ret == -1) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "pollin notification failed");
- }
- goto out;
-*/
+ if (header->rm_body.rm_error.rm_type == ERR_CHUNK) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "peer (%s), couldn't encode or decode the msg "
+ "properly or write chunks were not provided "
+ "for replies that were bigger than "
+ "RDMA_INLINE_THRESHOLD (%d)",
+ peer->trans->peerinfo.identifier,
+ GLUSTERFS_RDMA_INLINE_THRESHOLD);
+ ret = gf_rdma_pollin_notify (peer, post);
+ if (ret == -1) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ "pollin notification failed");
+ }
+ goto out;
+ } else {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
+ "an error has happened while transmission of "
+ "msg, disconnecting the transport");
+ ret = -1;
+ goto out;
+ }
default:
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"invalid rdma msg-type (%d)", header->rm_type);
- break;
+ goto out;
}
if (msg_type == CALL) {
ret = gf_rdma_recv_request (peer, post, readch);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "receiving a request from peer (%s) failed",
+ peer->trans->peerinfo.identifier);
+ }
} else {
ret = gf_rdma_recv_reply (peer, post);
+ if (ret < 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "receiving a reply from peer (%s) failed",
+ peer->trans->peerinfo.identifier);
+ }
}
out:
@@ -3294,6 +3754,42 @@ out:
return;
}
+void *
+gf_rdma_async_event_thread (void *context)
+{
+ struct ibv_async_event event;
+ int ret;
+
+ while (1) {
+ do {
+ ret = ibv_get_async_event((struct ibv_context *)context,
+ &event);
+
+ if (ret && errno != EINTR) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "Error getting event (%s)",
+ strerror (errno));
+ }
+ } while(ret && errno == EINTR);
+
+ switch (event.event_type) {
+ case IBV_EVENT_SRQ_LIMIT_REACHED:
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "recieved srq_limit reached");
+ break;
+
+ default:
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
+ "event (%d) recieved", event.event_type);
+ break;
+ }
+
+ ibv_ack_async_event(&event);
+ }
+
+ return 0;
+}
+
static void *
gf_rdma_recv_completion_proc (void *data)
@@ -3412,7 +3908,7 @@ gf_rdma_handle_failed_send_completion (gf_rdma_peer_t *peer, struct ibv_wc *wc)
post = (gf_rdma_post_t *) (long) wc->wr_id;
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
"send work request on `%s' returned error "
"wc.status = %d, wc.vendor_err = %d, post->buf = %p, "
"wc.byte_len = %d, post->reused = %d",
@@ -3679,913 +4175,162 @@ gf_rdma_options_init (rpc_transport_t *this)
return;
}
-static void
-gf_rdma_queue_init (gf_rdma_queue_t *queue)
-{
- pthread_mutex_init (&queue->lock, NULL);
-
- queue->active_posts.next = &queue->active_posts;
- queue->active_posts.prev = &queue->active_posts;
- queue->passive_posts.next = &queue->passive_posts;
- queue->passive_posts.prev = &queue->passive_posts;
-}
-
-static gf_rdma_device_t *
-gf_rdma_get_device (rpc_transport_t *this, struct ibv_context *ibctx)
+gf_rdma_ctx_t *
+__gf_rdma_ctx_create (void)
{
- glusterfs_ctx_t *ctx = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- char *device_name = NULL;
- uint32_t port = 0;
- uint8_t active_port = 0;
- int32_t ret = 0;
- int32_t i = 0;
- gf_rdma_device_t *trav = NULL;
+ gf_rdma_ctx_t *rdma_ctx = NULL;
+ int ret = -1;
- priv = this->private;
- options = &priv->options;
- device_name = priv->options.device_name;
- ctx = this->ctx;
- trav = ctx->ib;
- port = priv->options.port;
-
- while (trav) {
- if ((!strcmp (trav->device_name, device_name)) &&
- (trav->port == port))
- break;
- trav = trav->next;
+ rdma_ctx = GF_CALLOC (1, sizeof (*rdma_ctx), gf_common_mt_char);
+ if (rdma_ctx == NULL) {
+ goto out;
}
- if (!trav) {
-
- trav = GF_CALLOC (1, sizeof (*trav),
- gf_common_mt_rdma_device_t);
- if (trav == NULL) {
- return NULL;
- }
-
- priv->device = trav;
-
- trav->context = ibctx;
-
- ret = ib_get_active_port (trav->context);
-
- if (ret < 0) {
- if (!port) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "Failed to find any active ports and "
- "none specified in volume file,"
- " exiting");
- GF_FREE (trav);
- return NULL;
- }
- }
-
- trav->request_ctx_pool = mem_pool_new (gf_rdma_request_context_t,
- GF_RDMA_POOL_SIZE);
- if (trav->request_ctx_pool == NULL) {
- return NULL;
- }
-
- trav->ioq_pool = mem_pool_new (gf_rdma_ioq_t, GF_RDMA_POOL_SIZE);
- if (trav->ioq_pool == NULL) {
- mem_pool_destroy (trav->request_ctx_pool);
- return NULL;
- }
-
- trav->reply_info_pool = mem_pool_new (gf_rdma_reply_info_t,
- GF_RDMA_POOL_SIZE);
- if (trav->reply_info_pool == NULL) {
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->ioq_pool);
- return NULL;
- }
-
-
- active_port = ret;
-
- if (port) {
- ret = ib_check_active_port (trav->context, port);
- if (ret < 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
- "On device %s: provided port:%u is "
- "found to be offline, continuing to "
- "use the same port", device_name, port);
- }
- } else {
- priv->options.port = active_port;
- port = active_port;
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE,
- "Port unspecified in volume file using active "
- "port: %u", port);
- }
-
- trav->device_name = gf_strdup (device_name);
- trav->port = port;
-
- trav->next = ctx->ib;
- ctx->ib = trav;
-
- trav->send_chan = ibv_create_comp_channel (trav->context);
- if (!trav->send_chan) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create send completion channel",
- device_name);
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- return NULL;
- }
-
- trav->recv_chan = ibv_create_comp_channel (trav->context);
- if (!trav->recv_chan) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "could not create recv completion channel");
- /* TODO: cleanup current mess */
- return NULL;
- }
-
- if (gf_rdma_create_cq (this) < 0) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create CQ",
- this->name);
- return NULL;
- }
-
- /* protection domain */
- trav->pd = ibv_alloc_pd (trav->context);
-
- if (!trav->pd) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not allocate protection domain",
- this->name);
- return NULL;
- }
-
- struct ibv_srq_init_attr attr = {
- .attr = {
- .max_wr = options->recv_count,
- .max_sge = 1
- }
- };
- trav->srq = ibv_create_srq (trav->pd, &attr);
-
- if (!trav->srq) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_dealloc_pd (trav->pd);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
-
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create SRQ",
- this->name);
- return NULL;
- }
-
- /* queue init */
- gf_rdma_queue_init (&trav->sendq);
- gf_rdma_queue_init (&trav->recvq);
-
- if (gf_rdma_create_posts (this) < 0) {
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_dealloc_pd (trav->pd);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
-
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not allocate posts",
- this->name);
- return NULL;
- }
-
- /* completion threads */
- ret = pthread_create (&trav->send_thread,
- NULL,
- gf_rdma_send_completion_proc,
- trav->send_chan);
- if (ret) {
- gf_rdma_destroy_posts (this);
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_dealloc_pd (trav->pd);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
+ rdma_ctx->rdma_cm_event_channel = rdma_create_event_channel ();
+ if (rdma_ctx->rdma_cm_event_channel == NULL) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "rdma_cm event channel creation failed (%s)",
+ strerror (errno));
+ goto out;
+ }
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "could not create send completion thread");
- return NULL;
- }
+ ret = gf_thread_create (&rdma_ctx->rdma_cm_thread, NULL,
+ gf_rdma_cm_event_handler,
+ rdma_ctx->rdma_cm_event_channel);
+ if (ret != 0) {
+ gf_log (GF_RDMA_LOG_NAME, GF_LOG_WARNING,
+ "creation of thread to handle rdma-cm events "
+ "failed (%s)", strerror (ret));
+ goto out;
+ }
- ret = pthread_create (&trav->recv_thread,
- NULL,
- gf_rdma_recv_completion_proc,
- trav->recv_chan);
- if (ret) {
- gf_rdma_destroy_posts (this);
- mem_pool_destroy (trav->ioq_pool);
- mem_pool_destroy (trav->request_ctx_pool);
- mem_pool_destroy (trav->reply_info_pool);
- ibv_dealloc_pd (trav->pd);
- gf_rdma_destroy_cq (this);
- ibv_destroy_comp_channel (trav->recv_chan);
- ibv_destroy_comp_channel (trav->send_chan);
- GF_FREE ((char *)trav->device_name);
- GF_FREE (trav);
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "could not create recv completion thread");
- return NULL;
+out:
+ if (ret < 0) {
+ if (rdma_ctx->rdma_cm_event_channel != NULL) {
+ rdma_destroy_event_channel (rdma_ctx->rdma_cm_event_channel);
}
- /* qpreg */
- pthread_mutex_init (&trav->qpreg.lock, NULL);
- for (i=0; i<42; i++) {
- trav->qpreg.ents[i].next = &trav->qpreg.ents[i];
- trav->qpreg.ents[i].prev = &trav->qpreg.ents[i];
- }
+ GF_FREE (rdma_ctx);
+ rdma_ctx = NULL;
}
- return trav;
+
+ return rdma_ctx;
}
static int32_t
gf_rdma_init (rpc_transport_t *this)
{
gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- struct ibv_device **dev_list;
- struct ibv_context *ib_ctx = NULL;
int32_t ret = 0;
+ glusterfs_ctx_t *ctx = NULL;
+ gf_rdma_options_t *options = NULL;
+
+ ctx= this->ctx;
priv = this->private;
- options = &priv->options;
ibv_fork_init ();
gf_rdma_options_init (this);
- {
- dev_list = ibv_get_device_list (NULL);
-
- if (!dev_list) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "Failed to get IB devices");
- ret = -1;
- goto cleanup;
- }
-
- if (!*dev_list) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "No IB devices found");
- ret = -1;
- goto cleanup;
- }
-
- if (!options->device_name) {
- if (*dev_list) {
- options->device_name =
- gf_strdup (ibv_get_device_name (*dev_list));
- } else {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_CRITICAL,
- "IB device list is empty. Check for "
- "'ib_uverbs' module");
- return -1;
- goto cleanup;
- }
- }
-
- while (*dev_list) {
- if (!strcmp (ibv_get_device_name (*dev_list),
- options->device_name)) {
- ib_ctx = ibv_open_device (*dev_list);
-
- if (!ib_ctx) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_ERROR,
- "Failed to get infiniband"
- "device context");
- ret = -1;
- goto cleanup;
- }
- break;
- }
- ++dev_list;
- }
-
- priv->device = gf_rdma_get_device (this, ib_ctx);
-
- if (!priv->device) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "could not create rdma device for %s",
- options->device_name);
- ret = -1;
- goto cleanup;
- }
- }
+ options = &priv->options;
+ priv->peer.send_count = options->send_count;
+ priv->peer.recv_count = options->recv_count;
+ priv->peer.send_size = options->send_size;
+ priv->peer.recv_size = options->recv_size;
priv->peer.trans = this;
INIT_LIST_HEAD (&priv->peer.ioq);
- pthread_mutex_init (&priv->read_mutex, NULL);
pthread_mutex_init (&priv->write_mutex, NULL);
pthread_mutex_init (&priv->recv_mutex, NULL);
pthread_cond_init (&priv->recv_cond, NULL);
-cleanup:
- if (-1 == ret) {
- if (ib_ctx)
- ibv_close_device (ib_ctx);
- }
-
- if (dev_list)
- ibv_free_device_list (dev_list);
-
- return ret;
-}
-
-
-static int32_t
-gf_rdma_disconnect (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- int32_t ret = 0;
-
- priv = this->private;
- pthread_mutex_lock (&priv->write_mutex);
- {
- ret = __gf_rdma_disconnect (this);
- }
- pthread_mutex_unlock (&priv->write_mutex);
-
- return ret;
-}
-
-
-static int32_t
-__tcp_connect_finish (int fd)
-{
- int ret = -1;
- int optval = 0;
- socklen_t optlen = sizeof (int);
-
- ret = getsockopt (fd, SOL_SOCKET, SO_ERROR,
- (void *)&optval, &optlen);
-
- if (ret == 0 && optval)
- {
- errno = optval;
- ret = -1;
- }
-
- return ret;
-}
-
-static inline void
-gf_rdma_fill_handshake_data (char *buf, struct gf_rdma_nbio *nbio,
- gf_rdma_private_t *priv)
-{
- sprintf (buf,
- "QP1:RECV_BLKSIZE=%08x:SEND_BLKSIZE=%08x\n"
- "QP1:LID=%04x:QPN=%06x:PSN=%06x\n",
- priv->peer.recv_size,
- priv->peer.send_size,
- priv->peer.local_lid,
- priv->peer.local_qpn,
- priv->peer.local_psn);
-
- nbio->vector.iov_base = buf;
- nbio->vector.iov_len = strlen (buf) + 1;
- nbio->count = 1;
- return;
-}
-
-static inline void
-gf_rdma_fill_handshake_ack (char *buf, struct gf_rdma_nbio *nbio)
-{
- sprintf (buf, "DONE\n");
- nbio->vector.iov_base = buf;
- nbio->vector.iov_len = strlen (buf) + 1;
- nbio->count = 1;
- return;
-}
-
-static int
-gf_rdma_handshake_pollin (rpc_transport_t *this)
-{
- int ret = 0;
- gf_rdma_private_t *priv = NULL;
- char *buf = NULL;
- int32_t recv_buf_size = 0, send_buf_size;
- socklen_t sock_len = 0;
-
- priv = this->private;
- buf = priv->handshake.incoming.buf;
-
- if (priv->handshake.incoming.state == GF_RDMA_HANDSHAKE_COMPLETE) {
- return -1;
- }
-
- pthread_mutex_lock (&priv->write_mutex);
+ pthread_mutex_lock (&ctx->lock);
{
- while (priv->handshake.incoming.state != GF_RDMA_HANDSHAKE_COMPLETE)
- {
- switch (priv->handshake.incoming.state)
- {
- case GF_RDMA_HANDSHAKE_START:
- buf = priv->handshake.incoming.buf = GF_CALLOC (1, 256, gf_common_mt_char);
- gf_rdma_fill_handshake_data (buf, &priv->handshake.incoming, priv);
- buf[0] = 0;
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVING_DATA;
- break;
-
- case GF_RDMA_HANDSHAKE_RECEIVING_DATA:
- ret = __tcp_readv (this,
- &priv->handshake.incoming.vector,
- priv->handshake.incoming.count,
- &priv->handshake.incoming.pending_vector,
- &priv->handshake.incoming.pending_count);
- if (ret == -1) {
- goto unlock;
- }
-
- if (ret > 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "partial header read on NB socket. continue later");
- goto unlock;
- }
-
- if (!ret) {
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVED_DATA;
- }
- break;
-
- case GF_RDMA_HANDSHAKE_RECEIVED_DATA:
- ret = sscanf (buf,
- "QP1:RECV_BLKSIZE=%08x:SEND_BLKSIZE=%08x\n"
- "QP1:LID=%04x:QPN=%06x:PSN=%06x\n",
- &recv_buf_size,
- &send_buf_size,
- &priv->peer.remote_lid,
- &priv->peer.remote_qpn,
- &priv->peer.remote_psn);
-
- if ((ret != 5) && (strncmp (buf, "QP1:", 4))) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_CRITICAL,
- "%s: remote-host(%s)'s "
- "transport type is different",
- this->name,
- this->peerinfo.identifier);
- ret = -1;
- goto unlock;
- }
-
- if (recv_buf_size < priv->peer.recv_size)
- priv->peer.recv_size = recv_buf_size;
- if (send_buf_size < priv->peer.send_size)
- priv->peer.send_size = send_buf_size;
-
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_TRACE,
- "%s: transacted recv_size=%d "
- "send_size=%d",
- this->name, priv->peer.recv_size,
- priv->peer.send_size);
-
- priv->peer.quota = priv->peer.send_count;
-
- if (gf_rdma_connect_qp (this)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_ERROR,
- "%s: failed to connect with "
- "remote QP", this->name);
- ret = -1;
- goto unlock;
- }
- gf_rdma_fill_handshake_ack (buf, &priv->handshake.incoming);
- buf[0] = 0;
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVING_ACK;
- break;
-
- case GF_RDMA_HANDSHAKE_RECEIVING_ACK:
- ret = __tcp_readv (this,
- &priv->handshake.incoming.vector,
- priv->handshake.incoming.count,
- &priv->handshake.incoming.pending_vector,
- &priv->handshake.incoming.pending_count);
- if (ret == -1) {
- goto unlock;
- }
-
- if (ret > 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "partial header read on NB "
- "socket. continue later");
- goto unlock;
- }
-
- if (!ret) {
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_RECEIVED_ACK;
- }
- break;
-
- case GF_RDMA_HANDSHAKE_RECEIVED_ACK:
- if (strncmp (buf, "DONE", 4)) {
- gf_log (GF_RDMA_LOG_NAME,
- GF_LOG_DEBUG,
- "%s: handshake-3 did not "
- "return 'DONE' (%s)",
- this->name, buf);
- ret = -1;
- goto unlock;
- }
- ret = 0;
- priv->connected = 1;
- sock_len = sizeof (struct sockaddr_storage);
- getpeername (priv->sock,
- (struct sockaddr *) &this->peerinfo.sockaddr,
- &sock_len);
-
- GF_FREE (priv->handshake.incoming.buf);
- priv->handshake.incoming.buf = NULL;
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_COMPLETE;
+ if (ctx->ib == NULL) {
+ ctx->ib = __gf_rdma_ctx_create ();
+ if (ctx->ib == NULL) {
+ ret = -1;
}
}
}
-unlock:
- pthread_mutex_unlock (&priv->write_mutex);
-
- if (ret == -1) {
- rpc_transport_disconnect (this);
- } else {
- ret = 0;
- }
-
-
- if (!ret && priv->connected) {
- if (priv->is_server) {
- ret = rpc_transport_notify (priv->listener,
- RPC_TRANSPORT_ACCEPT,
- this);
- } else {
- ret = rpc_transport_notify (this, RPC_TRANSPORT_CONNECT,
- this);
- }
- }
+ pthread_mutex_unlock (&ctx->lock);
return ret;
}
-static int
-gf_rdma_handshake_pollout (rpc_transport_t *this)
+
+static int32_t
+gf_rdma_disconnect (rpc_transport_t *this)
{
gf_rdma_private_t *priv = NULL;
- char *buf = NULL;
int32_t ret = 0;
priv = this->private;
- buf = priv->handshake.outgoing.buf;
-
- if (priv->handshake.outgoing.state == GF_RDMA_HANDSHAKE_COMPLETE) {
- return 0;
- }
-
- pthread_mutex_unlock (&priv->write_mutex);
- {
- while (priv->handshake.outgoing.state
- != GF_RDMA_HANDSHAKE_COMPLETE)
- {
- switch (priv->handshake.outgoing.state)
- {
- case GF_RDMA_HANDSHAKE_START:
- buf = priv->handshake.outgoing.buf
- = GF_CALLOC (1, 256, gf_common_mt_char);
- gf_rdma_fill_handshake_data (buf,
- &priv->handshake.outgoing, priv);
- priv->handshake.outgoing.state
- = GF_RDMA_HANDSHAKE_SENDING_DATA;
- break;
-
- case GF_RDMA_HANDSHAKE_SENDING_DATA:
- ret = __tcp_writev (this,
- &priv->handshake.outgoing.vector,
- priv->handshake.outgoing.count,
- &priv->handshake.outgoing.pending_vector,
- &priv->handshake.outgoing.pending_count);
- if (ret == -1) {
- goto unlock;
- }
-
- if (ret > 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "partial header read on NB "
- "socket. continue later");
- goto unlock;
- }
-
- if (!ret) {
- priv->handshake.outgoing.state
- = GF_RDMA_HANDSHAKE_SENT_DATA;
- }
- break;
-
- case GF_RDMA_HANDSHAKE_SENT_DATA:
- gf_rdma_fill_handshake_ack (buf,
- &priv->handshake.outgoing);
- priv->handshake.outgoing.state
- = GF_RDMA_HANDSHAKE_SENDING_ACK;
- break;
-
- case GF_RDMA_HANDSHAKE_SENDING_ACK:
- ret = __tcp_writev (this,
- &priv->handshake.outgoing.vector,
- priv->handshake.outgoing.count,
- &priv->handshake.outgoing.pending_vector,
- &priv->handshake.outgoing.pending_count);
-
- if (ret == -1) {
- goto unlock;
- }
-
- if (ret > 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "partial header read on NB "
- "socket. continue later");
- goto unlock;
- }
-
- if (!ret) {
- GF_FREE (priv->handshake.outgoing.buf);
- priv->handshake.outgoing.buf = NULL;
- priv->handshake.outgoing.state
- = GF_RDMA_HANDSHAKE_COMPLETE;
- }
- break;
- }
- }
- }
-unlock:
- pthread_mutex_unlock (&priv->write_mutex);
-
- if (ret == -1) {
- rpc_transport_disconnect (this);
- } else {
- ret = 0;
- }
-
- return ret;
-}
-
-static int
-gf_rdma_handshake_pollerr (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = this->private;
- char need_unref = 0, connected = 0;
-
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_DEBUG,
- "%s: peer disconnected, cleaning up",
- this->name);
+ gf_log_callingfn (this->name, GF_LOG_WARNING,
+ "disconnect called (peer:%s)",
+ this->peerinfo.identifier);
pthread_mutex_lock (&priv->write_mutex);
{
- __gf_rdma_teardown (this);
-
- connected = priv->connected;
- if (priv->sock != -1) {
- event_unregister (this->ctx->event_pool,
- priv->sock, priv->idx);
- need_unref = 1;
-
- if (close (priv->sock) != 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "close () - error: %s",
- strerror (errno));
- }
- priv->tcp_connected = priv->connected = 0;
- priv->sock = -1;
- }
-
- if (priv->handshake.incoming.buf) {
- GF_FREE (priv->handshake.incoming.buf);
- priv->handshake.incoming.buf = NULL;
- }
-
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_START;
-
- if (priv->handshake.outgoing.buf) {
- GF_FREE (priv->handshake.outgoing.buf);
- priv->handshake.outgoing.buf = NULL;
- }
-
- priv->handshake.outgoing.state = GF_RDMA_HANDSHAKE_START;
- }
- pthread_mutex_unlock (&priv->write_mutex);
-
- if (connected) {
- rpc_transport_notify (this, RPC_TRANSPORT_DISCONNECT, this);
- }
-
- if (need_unref)
- rpc_transport_unref (this);
-
- return 0;
-}
-
-
-static int
-tcp_connect_finish (rpc_transport_t *this)
-{
- gf_rdma_private_t *priv = NULL;
- int error = 0, ret = 0;
-
- priv = this->private;
- pthread_mutex_lock (&priv->write_mutex);
- {
- ret = __tcp_connect_finish (priv->sock);
-
- if (!ret) {
- this->myinfo.sockaddr_len =
- sizeof (this->myinfo.sockaddr);
- ret = getsockname (priv->sock,
- (struct sockaddr *)&this->myinfo.sockaddr,
- &this->myinfo.sockaddr_len);
- if (ret == -1)
- {
- gf_log (this->name, GF_LOG_ERROR,
- "getsockname on new client-socket %d "
- "failed (%s)",
- priv->sock, strerror (errno));
- close (priv->sock);
- error = 1;
- goto unlock;
- }
-
- gf_rdma_get_transport_identifiers (this);
- priv->tcp_connected = 1;
- }
-
- if (ret == -1 && errno != EINPROGRESS) {
- gf_log (this->name, GF_LOG_ERROR,
- "tcp connect to %s failed (%s)",
- this->peerinfo.identifier, strerror (errno));
- error = 1;
- }
+ ret = __gf_rdma_disconnect (this);
}
-unlock:
pthread_mutex_unlock (&priv->write_mutex);
- if (error) {
- rpc_transport_disconnect (this);
- }
-
return ret;
}
-static int
-gf_rdma_event_handler (int fd, int idx, void *data,
- int poll_in, int poll_out, int poll_err)
-{
- rpc_transport_t *this = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_options_t *options = NULL;
- int ret = 0;
-
- this = data;
- priv = this->private;
- if (!priv->tcp_connected) {
- ret = tcp_connect_finish (this);
- if (priv->tcp_connected) {
- options = &priv->options;
-
- priv->peer.send_count = options->send_count;
- priv->peer.recv_count = options->recv_count;
- priv->peer.send_size = options->send_size;
- priv->peer.recv_size = options->recv_size;
-
- if ((ret = gf_rdma_create_qp (this)) < 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create QP",
- this->name);
- rpc_transport_disconnect (this);
- }
- }
- }
-
- if (!ret && poll_out && priv->tcp_connected) {
- ret = gf_rdma_handshake_pollout (this);
- }
-
- if (!ret && !poll_err && poll_in && priv->tcp_connected) {
- if (priv->handshake.incoming.state
- == GF_RDMA_HANDSHAKE_COMPLETE) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: pollin received on tcp socket (peer: %s) "
- "after handshake is complete",
- this->name, this->peerinfo.identifier);
- gf_rdma_handshake_pollerr (this);
- return 0;
- }
- ret = gf_rdma_handshake_pollin (this);
- }
-
- if (ret < 0 || poll_err) {
- ret = gf_rdma_handshake_pollerr (this);
- }
-
- return 0;
-}
-
-static int
-__tcp_nonblock (int fd)
-{
- int flags = 0;
- int ret = -1;
-
- flags = fcntl (fd, F_GETFL);
-
- if (flags != -1)
- ret = fcntl (fd, F_SETFL, flags | O_NONBLOCK);
-
- return ret;
-}
static int32_t
gf_rdma_connect (struct rpc_transport *this, int port)
{
gf_rdma_private_t *priv = NULL;
int32_t ret = 0;
- gf_boolean_t non_blocking = 1;
union gf_sock_union sock_union = {{0, }, };
socklen_t sockaddr_len = 0;
+ gf_rdma_peer_t *peer = NULL;
+ gf_rdma_ctx_t *rdma_ctx = NULL;
+ gf_boolean_t connected = _gf_false;
priv = this->private;
+ peer = &priv->peer;
+
+ rpc_transport_ref (this);
+
ret = gf_rdma_client_get_remote_sockaddr (this,
&sock_union.sa,
&sockaddr_len, port);
if (ret != 0) {
gf_log (this->name, GF_LOG_DEBUG,
"cannot get remote address to connect");
- return ret;
+ goto out;
}
+ rdma_ctx = this->ctx->ib;
pthread_mutex_lock (&priv->write_mutex);
{
- if (priv->sock != -1) {
- ret = 0;
+ if (peer->cm_id != NULL) {
+ ret = -1;
+ errno = EINPROGRESS;
+ connected = _gf_true;
goto unlock;
}
- priv->sock = socket (sock_union.sa.sa_family, SOCK_STREAM, 0);
+ priv->entity = GF_RDMA_CLIENT;
- if (priv->sock == -1) {
+ ret = rdma_create_id (rdma_ctx->rdma_cm_event_channel,
+ &peer->cm_id, this, RDMA_PS_TCP);
+ if (ret != 0) {
gf_log (this->name, GF_LOG_ERROR,
- "socket () - error: %s", strerror (errno));
+ "creation of rdma_cm_id failed (%s)",
+ strerror (errno));
ret = -errno;
goto unlock;
}
- gf_log (this->name, GF_LOG_TRACE,
- "socket fd = %d", priv->sock);
-
memcpy (&this->peerinfo.sockaddr, &sock_union.storage,
sockaddr_len);
this->peerinfo.sockaddr_len = sockaddr_len;
@@ -4596,201 +4341,84 @@ gf_rdma_connect (struct rpc_transport *this, int port)
((struct sockaddr *) &this->myinfo.sockaddr)->sa_family =
((struct sockaddr *)&this->peerinfo.sockaddr)->sa_family;
- if (non_blocking)
- {
- ret = __tcp_nonblock (priv->sock);
-
- if (ret == -1)
- {
- gf_log (this->name, GF_LOG_ERROR,
- "could not set socket %d to non "
- "blocking mode (%s)",
- priv->sock, strerror (errno));
- close (priv->sock);
- priv->sock = -1;
- goto unlock;
- }
- }
-
ret = gf_rdma_client_bind (this,
(struct sockaddr *)&this->myinfo.sockaddr,
&this->myinfo.sockaddr_len,
- priv->sock);
- if (ret == -1)
- {
+ peer->cm_id);
+ if (ret != 0) {
gf_log (this->name, GF_LOG_WARNING,
"client bind failed: %s", strerror (errno));
- close (priv->sock);
- priv->sock = -1;
goto unlock;
}
- ret = connect (priv->sock,
- (struct sockaddr *)&this->peerinfo.sockaddr,
- this->peerinfo.sockaddr_len);
- if (ret == -1 && errno != EINPROGRESS)
- {
- gf_log (this->name, GF_LOG_ERROR,
- "connection attempt failed (%s)",
+ ret = rdma_resolve_addr (peer->cm_id, NULL, &sock_union.sa,
+ 2000);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_resolve_addr failed (%s)",
strerror (errno));
- close (priv->sock);
- priv->sock = -1;
goto unlock;
}
- priv->tcp_connected = priv->connected = 0;
-
- rpc_transport_ref (this);
-
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_START;
- priv->handshake.outgoing.state = GF_RDMA_HANDSHAKE_START;
-
- priv->idx = event_register (this->ctx->event_pool,
- priv->sock, gf_rdma_event_handler,
- this, 1, 1);
+ priv->connected = 0;
}
unlock:
pthread_mutex_unlock (&priv->write_mutex);
- return ret;
-}
-
-static int
-gf_rdma_server_event_handler (int fd, int idx, void *data,
- int poll_in, int poll_out, int poll_err)
-{
- int32_t main_sock = -1;
- rpc_transport_t *this = NULL, *trans = NULL;
- gf_rdma_private_t *priv = NULL;
- gf_rdma_private_t *trans_priv = NULL;
- gf_rdma_options_t *options = NULL;
-
- if (!poll_in) {
- return 0;
- }
-
- trans = data;
- trans_priv = (gf_rdma_private_t *) trans->private;
-
- this = GF_CALLOC (1, sizeof (rpc_transport_t),
- gf_common_mt_rpc_transport_t);
- if (this == NULL) {
- return -1;
- }
-
- this->listener = trans;
-
- priv = GF_CALLOC (1, sizeof (gf_rdma_private_t),
- gf_common_mt_rdma_private_t);
- if (priv == NULL) {
- GF_FREE (priv);
- return -1;
- }
- this->private = priv;
- /* Copy all the rdma related values in priv, from trans_priv
- as other than QP, all the values remain same */
- priv->device = trans_priv->device;
- priv->options = trans_priv->options;
- priv->is_server = 1;
- priv->listener = trans;
-
- options = &priv->options;
-
- this->ops = trans->ops;
- this->init = trans->init;
- this->fini = trans->fini;
- this->ctx = trans->ctx;
- this->name = gf_strdup (trans->name);
- this->notify = trans->notify;
- this->mydata = trans->mydata;
-
- memcpy (&this->myinfo.sockaddr, &trans->myinfo.sockaddr,
- trans->myinfo.sockaddr_len);
- this->myinfo.sockaddr_len = trans->myinfo.sockaddr_len;
-
- main_sock = (trans_priv)->sock;
- this->peerinfo.sockaddr_len = sizeof (this->peerinfo.sockaddr);
- priv->sock = accept (main_sock,
- (struct sockaddr *)&this->peerinfo.sockaddr,
- &this->peerinfo.sockaddr_len);
- if (priv->sock == -1) {
- gf_log ("rdma/server", GF_LOG_ERROR,
- "accept() failed: %s",
- strerror (errno));
- GF_FREE (this->private);
- GF_FREE (this);
- return -1;
- }
-
- priv->peer.trans = this;
- rpc_transport_ref (this);
-
- gf_rdma_get_transport_identifiers (this);
-
- priv->tcp_connected = 1;
- priv->handshake.incoming.state = GF_RDMA_HANDSHAKE_START;
- priv->handshake.outgoing.state = GF_RDMA_HANDSHAKE_START;
-
- priv->peer.send_count = options->send_count;
- priv->peer.recv_count = options->recv_count;
- priv->peer.send_size = options->send_size;
- priv->peer.recv_size = options->recv_size;
- INIT_LIST_HEAD (&priv->peer.ioq);
+out:
+ if (ret != 0) {
+ if (!connected) {
+ gf_rdma_teardown (this);
+ }
- if (gf_rdma_create_qp (this) < 0) {
- gf_log (GF_RDMA_LOG_NAME, GF_LOG_ERROR,
- "%s: could not create QP",
- this->name);
- rpc_transport_disconnect (this);
- return -1;
+ rpc_transport_unref (this);
}
- priv->idx = event_register (this->ctx->event_pool, priv->sock,
- gf_rdma_event_handler, this, 1, 1);
-
- pthread_mutex_init (&priv->read_mutex, NULL);
- pthread_mutex_init (&priv->write_mutex, NULL);
- pthread_mutex_init (&priv->recv_mutex, NULL);
- /* pthread_cond_init (&priv->recv_cond, NULL); */
- return 0;
+ return ret;
}
+
static int32_t
gf_rdma_listen (rpc_transport_t *this)
{
union gf_sock_union sock_union = {{0, }, };
socklen_t sockaddr_len = 0;
gf_rdma_private_t *priv = NULL;
- int opt = 1, ret = 0;
+ gf_rdma_peer_t *peer = NULL;
+ int ret = 0;
+ gf_rdma_ctx_t *rdma_ctx = NULL;
char service[NI_MAXSERV], host[NI_MAXHOST];
priv = this->private;
- memset (&sock_union, 0, sizeof (sock_union));
- ret = gf_rdma_server_get_local_sockaddr (this,
- &sock_union.sa,
+ peer = &priv->peer;
+
+ priv->entity = GF_RDMA_SERVER_LISTENER;
+
+ rdma_ctx = this->ctx->ib;
+
+ ret = gf_rdma_server_get_local_sockaddr (this, &sock_union.sa,
&sockaddr_len);
if (ret != 0) {
- gf_log (this->name, GF_LOG_DEBUG,
+ gf_log (this->name, GF_LOG_WARNING,
"cannot find network address of server to bind to");
goto err;
}
- priv->sock = socket (sock_union.sa.sa_family, SOCK_STREAM, 0);
- if (priv->sock == -1) {
- gf_log ("rdma/server", GF_LOG_CRITICAL,
- "init: failed to create socket, error: %s",
+ ret = rdma_create_id (rdma_ctx->rdma_cm_event_channel,
+ &peer->cm_id, this, RDMA_PS_TCP);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "creation of rdma_cm_id failed (%s)",
strerror (errno));
- GF_FREE (this->private);
- ret = -1;
goto err;
}
- memcpy (&this->myinfo.sockaddr, &sock_union.storage, sockaddr_len);
+ memcpy (&this->myinfo.sockaddr, &sock_union.storage,
+ sockaddr_len);
this->myinfo.sockaddr_len = sockaddr_len;
ret = getnameinfo ((struct sockaddr *)&this->myinfo.sockaddr,
- this->myinfo.sockaddr_len,
- host, sizeof (host),
+ this->myinfo.sockaddr_len, host, sizeof (host),
service, sizeof (service),
NI_NUMERICHOST);
if (ret != 0) {
@@ -4798,34 +4426,38 @@ gf_rdma_listen (rpc_transport_t *this)
"getnameinfo failed (%s)", gai_strerror (ret));
goto err;
}
+
sprintf (this->myinfo.identifier, "%s:%s", host, service);
- setsockopt (priv->sock, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof (opt));
- if (bind (priv->sock, &sock_union.sa, sockaddr_len) != 0) {
- ret = -1;
- gf_log ("rdma/server", GF_LOG_ERROR,
- "init: failed to bind to socket for %s (%s)",
- this->myinfo.identifier, strerror (errno));
+ ret = rdma_bind_addr (peer->cm_id, &sock_union.sa);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_bind_addr failed (%s)", strerror (errno));
goto err;
}
- if (listen (priv->sock, 10) != 0) {
- gf_log ("rdma/server", GF_LOG_ERROR,
- "init: listen () failed on socket for %s (%s)",
- this->myinfo.identifier, strerror (errno));
- ret = -1;
+ ret = rdma_listen (peer->cm_id, 10);
+ if (ret != 0) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "rdma_listen failed (%s)", strerror (errno));
goto err;
}
- /* Register the main socket */
- priv->idx = event_register (this->ctx->event_pool, priv->sock,
- gf_rdma_server_event_handler,
- rpc_transport_ref (this), 1, 0);
+ rpc_transport_ref (this);
+ ret = 0;
err:
+ if (ret < 0) {
+ if (peer->cm_id != NULL) {
+ rdma_destroy_id (peer->cm_id);
+ peer->cm_id = NULL;
+ }
+ }
+
return ret;
}
+
struct rpc_transport_ops tops = {
.submit_request = gf_rdma_submit_request,
.submit_reply = gf_rdma_submit_reply,
@@ -4844,7 +4476,6 @@ init (rpc_transport_t *this)
return -1;
this->private = priv;
- priv->sock = -1;
if (gf_rdma_init (this)) {
gf_log (this->name, GF_LOG_ERROR,
@@ -4868,13 +4499,6 @@ fini (struct rpc_transport *this)
if (priv) {
pthread_mutex_destroy (&priv->recv_mutex);
pthread_mutex_destroy (&priv->write_mutex);
- pthread_mutex_destroy (&priv->read_mutex);
-
- /* pthread_cond_destroy (&priv->recv_cond); */
- if (priv->sock != -1) {
- event_unregister (this->ctx->event_pool,
- priv->sock, priv->idx);
- }
gf_log (this->name, GF_LOG_TRACE,
"called fini on transport: %p", this);